[ { "cuda": "\n#include \n\n#include \n\nnamespace at { namespace cuda {\n\n/**\n Computes ceil(a / b)\n*/\ntemplate \n__host__ __device__ __forceinline__ T ATenCeilDiv(T a, T b) {\n return (a + b - 1) / b;\n}\n\nnamespace {\n\n// Threads per block for our apply kernel\n// FIXME: use occupancy calculator instead\nconstexpr uint32_t AT_APPLY_THREADS_PER_BLOCK = 512;\nconstexpr uint32_t AT_APPLY_BLOCKS_PER_SM = 4;\n\ntemplate \ninline bool getApplyGrid(uint64_t totalElements, dim3& grid, int64_t curDevice, int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK) {\n if (curDevice == -1) return false;\n uint64_t numel_per_thread = static_cast(max_threads_per_block) * static_cast(step);\n uint64_t numBlocks = ATenCeilDiv(totalElements, numel_per_thread);\n uint64_t maxGridX = at::cuda::getDeviceProperties(curDevice)->maxGridSize[0];\n if (numBlocks > maxGridX)\n numBlocks = maxGridX;\n grid = dim3(numBlocks);\n return true;\n}\n\nconstexpr int getApplyBlocksPerSM() {\n return AT_APPLY_BLOCKS_PER_SM;\n}\n\nconstexpr int getApplyBlockSize() {\n return AT_APPLY_THREADS_PER_BLOCK;\n}\n\ninline dim3 getApplyBlock(int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK) {\n return dim3(max_threads_per_block);\n}\n\n}\n}} // namespace at::cuda\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \n\n#include \n\nnamespace at { namespace hip {\n\n/**\n Computes ceil(a / b)\n*/\ntemplate \n__host__ __device__ __forceinline__ T ATenCeilDiv(T a, T b) {\n return (a + b - 1) / b;\n}\n\nnamespace {\n\n// Threads per block for our apply kernel\n// FIXME: use occupancy calculator instead\nconstexpr uint32_t AT_APPLY_THREADS_PER_BLOCK = 512;\nconstexpr uint32_t AT_APPLY_BLOCKS_PER_SM = 4;\n\ntemplate \ninline bool getApplyGrid(uint64_t totalElements, dim3& grid, int64_t curDevice, int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK) {\n if (curDevice == -1) return false;\n uint64_t numel_per_thread = static_cast(max_threads_per_block) * static_cast(step);\n uint64_t numBlocks = ATenCeilDiv(totalElements, numel_per_thread);\n uint64_t maxGridX = at::cuda::getDeviceProperties(curDevice)->maxGridSize[0];\n if (numBlocks > maxGridX)\n numBlocks = maxGridX;\n grid = dim3(numBlocks);\n return true;\n}\n\nconstexpr int getApplyBlocksPerSM() {\n return AT_APPLY_BLOCKS_PER_SM;\n}\n\nconstexpr int getApplyBlockSize() {\n return AT_APPLY_THREADS_PER_BLOCK;\n}\n\ninline dim3 getApplyBlock(int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK) {\n return dim3(max_threads_per_block);\n}\n\n}\n}} // namespace at::cuda\n###" }, { "cuda": "\n#define TORCH_ASSERT_ONLY_METHOD_OPERATORS\n#include \n#include \n#include \n#include \n\n#ifndef AT_PER_OPERATOR_HEADERS\n#include \n#else\n#include \n#endif\n\ntemplate \n__global__ static void compute_cuda_kernel(\n index_t* repeat_ptr,\n int64_t* cumsum_ptr,\n index_t* result_ptr,\n int64_t size,\n int64_t result_size) {\n CUDA_KERNEL_ASSERT(result_size == cumsum_ptr[size - 1]);\n int64_t idx = blockIdx.x * blockDim.x + threadIdx.x;\n int64_t stride = (blockDim.x * gridDim.x) / C10_WARP_SIZE;\n int warp_id = idx / C10_WARP_SIZE;\n int tid_in_warp = idx % C10_WARP_SIZE;\n for (int64_t i = warp_id; i < size; i += stride) {\n int64_t end = cumsum_ptr[i];\n index_t repeat = repeat_ptr[i];\n CUDA_KERNEL_ASSERT(repeat >= 0);\n int64_t start = end - repeat;\n for (int64_t j = start + tid_in_warp; j < end; j += C10_WARP_SIZE) {\n result_ptr[j] = i;\n }\n }\n}\n\ntemplate \nstatic void compute_cuda(\n index_t* repeat_ptr,\n int64_t* cumsum_ptr,\n index_t* result_ptr,\n int64_t size,\n int64_t result_size) {\n int64_t block = 512;\n int64_t warps_per_block = block / at::cuda::warp_size();\n int64_t grid =\n std::min((size + warps_per_block - 1) / warps_per_block, 2048L);\n\n compute_cuda_kernel<<>>(\n repeat_ptr, cumsum_ptr, result_ptr, size, result_size);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n}\n\nnamespace at::native {\n\nTensor repeat_interleave_cuda(\n const Tensor& repeat,\n c10::optional output_size) {\n Tensor output;\n AT_DISPATCH_INDEX_TYPES(\n repeat.scalar_type(), \"repeat_interleave_cuda\", [&]() {\n output = repeat_interleave_common>(\n repeat, output_size);\n });\n return output;\n}\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#define TORCH_ASSERT_ONLY_METHOD_OPERATORS\n#include \n#include \n#include \n#include \n\n#ifndef AT_PER_OPERATOR_HEADERS\n#include \n#else\n#include \n#endif\n\ntemplate \n__global__ static void compute_hip_kernel(\n index_t* repeat_ptr,\n int64_t* cumsum_ptr,\n index_t* result_ptr,\n int64_t size,\n int64_t result_size) {\n CUDA_KERNEL_ASSERT(result_size == cumsum_ptr[size - 1]);\n int64_t idx = blockIdx.x * blockDim.x + threadIdx.x;\n int64_t stride = (blockDim.x * gridDim.x) / C10_WARP_SIZE;\n int warp_id = idx / C10_WARP_SIZE;\n int tid_in_warp = idx % C10_WARP_SIZE;\n for (int64_t i = warp_id; i < size; i += stride) {\n int64_t end = cumsum_ptr[i];\n index_t repeat = repeat_ptr[i];\n CUDA_KERNEL_ASSERT(repeat >= 0);\n int64_t start = end - repeat;\n for (int64_t j = start + tid_in_warp; j < end; j += C10_WARP_SIZE) {\n result_ptr[j] = i;\n }\n }\n}\n\ntemplate \nstatic void compute_hip(\n index_t* repeat_ptr,\n int64_t* cumsum_ptr,\n index_t* result_ptr,\n int64_t size,\n int64_t result_size) {\n int64_t block = 512;\n int64_t warps_per_block = block / at::cuda::warp_size();\n int64_t grid =\n std::min((size + warps_per_block - 1) / warps_per_block, 2048L);\n\n hipLaunchKernelGGL(( compute_hip_kernel), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStream(), \n repeat_ptr, cumsum_ptr, result_ptr, size, result_size);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n}\n\nnamespace at::native {\n\nTensor repeat_interleave_hip(\n const Tensor& repeat,\n c10::optional output_size) {\n Tensor output;\n AT_DISPATCH_INDEX_TYPES(\n repeat.scalar_type(), \"repeat_interleave_hip\", [&]() {\n output = repeat_interleave_common>(\n repeat, output_size);\n });\n return output;\n}\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_CUDA char scaled_modified_bessel_k0_name[] = \"scaled_modified_bessel_k0_forward\";\n\n void scaled_modified_bessel_k0_kernel_cuda(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"scaled_modified_bessel_k0_cuda\", [&]() {\n jitted_gpu_kernel(iterator, scaled_modified_bessel_k0_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"scaled_modified_bessel_k0_cuda\", [&]() {\n gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {\n return scaled_modified_bessel_k0_forward(a);\n });\n });\n#endif // AT_USE_JITERATOR()\n }\n }\n\n REGISTER_DISPATCH(special_scaled_modified_bessel_k0_stub, &scaled_modified_bessel_k0_kernel_cuda);\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_HIP char scaled_modified_bessel_k0_name[] = \"scaled_modified_bessel_k0_forward\";\n\n void scaled_modified_bessel_k0_kernel_hip(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"scaled_modified_bessel_k0_hip\", [&]() {\n jitted_gpu_kernel(iterator, scaled_modified_bessel_k0_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"scaled_modified_bessel_k0_hip\", [&]() {\n gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {\n return scaled_modified_bessel_k0_forward(a);\n });\n });\n#endif // AT_USE_JITERATOR()\n }\n }\n\n REGISTER_DISPATCH(special_scaled_modified_bessel_k0_stub, &scaled_modified_bessel_k0_kernel_hip);\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_CUDA char scaled_modified_bessel_k1_name[] = \"scaled_modified_bessel_k1_forward\";\n\n void scaled_modified_bessel_k1_kernel_cuda(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"scaled_modified_bessel_k1_cuda\", [&]() {\n jitted_gpu_kernel(iterator, scaled_modified_bessel_k1_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"scaled_modified_bessel_k1_cuda\", [&]() {\n gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {\n return scaled_modified_bessel_k1_forward(a);\n });\n });\n#endif // AT_USE_JITERATOR()\n }\n }\n\n REGISTER_DISPATCH(special_scaled_modified_bessel_k1_stub, &scaled_modified_bessel_k1_kernel_cuda);\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_HIP char scaled_modified_bessel_k1_name[] = \"scaled_modified_bessel_k1_forward\";\n\n void scaled_modified_bessel_k1_kernel_hip(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"scaled_modified_bessel_k1_hip\", [&]() {\n jitted_gpu_kernel(iterator, scaled_modified_bessel_k1_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"scaled_modified_bessel_k1_hip\", [&]() {\n gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {\n return scaled_modified_bessel_k1_forward(a);\n });\n });\n#endif // AT_USE_JITERATOR()\n }\n }\n\n REGISTER_DISPATCH(special_scaled_modified_bessel_k1_stub, &scaled_modified_bessel_k1_kernel_hip);\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_CUDA char shifted_chebyshev_polynomial_t_name[] = \"shifted_chebyshev_polynomial_t_forward\";\n\n void shifted_chebyshev_polynomial_t_kernel_cuda(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"shifted_chebyshev_polynomial_t_cuda\", [&]() {\n opmath_jitted_gpu_kernel_with_scalars(iterator, shifted_chebyshev_polynomial_t_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"shifted_chebyshev_polynomial_t_cuda\", [&]() {\n gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {\n return shifted_chebyshev_polynomial_t_forward(x, n);\n });\n });\n#endif\n } // shifted_chebyshev_polynomial_t_kernel_cuda\n } // namespace (anonymous)\n\n REGISTER_DISPATCH(shifted_chebyshev_polynomial_t_stub, &shifted_chebyshev_polynomial_t_kernel_cuda);\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_HIP char shifted_chebyshev_polynomial_t_name[] = \"shifted_chebyshev_polynomial_t_forward\";\n\n void shifted_chebyshev_polynomial_t_kernel_hip(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"shifted_chebyshev_polynomial_t_hip\", [&]() {\n opmath_jitted_gpu_kernel_with_scalars(iterator, shifted_chebyshev_polynomial_t_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"shifted_chebyshev_polynomial_t_hip\", [&]() {\n gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {\n return shifted_chebyshev_polynomial_t_forward(x, n);\n });\n });\n#endif\n } // shifted_chebyshev_polynomial_t_kernel_hip\n } // namespace (anonymous)\n\n REGISTER_DISPATCH(shifted_chebyshev_polynomial_t_stub, &shifted_chebyshev_polynomial_t_kernel_hip);\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_CUDA char shifted_chebyshev_polynomial_u_name[] = \"shifted_chebyshev_polynomial_u_forward\";\n\n void shifted_chebyshev_polynomial_u_kernel_cuda(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"shifted_chebyshev_polynomial_u_cuda\", [&]() {\n opmath_jitted_gpu_kernel_with_scalars(iterator, shifted_chebyshev_polynomial_u_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"shifted_chebyshev_polynomial_u_cuda\", [&]() {\n gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {\n return shifted_chebyshev_polynomial_u_forward(x, n);\n });\n });\n#endif\n } // shifted_chebyshev_polynomial_u_kernel_cuda\n } // namespace (anonymous)\n\n REGISTER_DISPATCH(shifted_chebyshev_polynomial_u_stub, &shifted_chebyshev_polynomial_u_kernel_cuda);\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_HIP char shifted_chebyshev_polynomial_u_name[] = \"shifted_chebyshev_polynomial_u_forward\";\n\n void shifted_chebyshev_polynomial_u_kernel_hip(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"shifted_chebyshev_polynomial_u_hip\", [&]() {\n opmath_jitted_gpu_kernel_with_scalars(iterator, shifted_chebyshev_polynomial_u_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"shifted_chebyshev_polynomial_u_hip\", [&]() {\n gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {\n return shifted_chebyshev_polynomial_u_forward(x, n);\n });\n });\n#endif\n } // shifted_chebyshev_polynomial_u_kernel_hip\n } // namespace (anonymous)\n\n REGISTER_DISPATCH(shifted_chebyshev_polynomial_u_stub, &shifted_chebyshev_polynomial_u_kernel_hip);\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\nnamespace {\nCONSTEXPR_EXCEPT_WIN_CUDA char shifted_chebyshev_polynomial_v_name[] = \"shifted_chebyshev_polynomial_v_forward\";\n\nvoid shifted_chebyshev_polynomial_v_kernel_cuda(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"shifted_chebyshev_polynomial_v_cuda\", [&]() {\n opmath_jitted_gpu_kernel_with_scalars(iterator, shifted_chebyshev_polynomial_v_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"shifted_chebyshev_polynomial_v_cuda\", [&]() {\n gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {\n return shifted_chebyshev_polynomial_v_forward(x, n);\n });\n });\n#endif\n} // shifted_chebyshev_polynomial_v_kernel_cuda\n\n} // namespace (anonymous)\n\nREGISTER_DISPATCH(shifted_chebyshev_polynomial_v_stub, &shifted_chebyshev_polynomial_v_kernel_cuda);\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\nnamespace {\nCONSTEXPR_EXCEPT_WIN_HIP char shifted_chebyshev_polynomial_v_name[] = \"shifted_chebyshev_polynomial_v_forward\";\n\nvoid shifted_chebyshev_polynomial_v_kernel_hip(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"shifted_chebyshev_polynomial_v_hip\", [&]() {\n opmath_jitted_gpu_kernel_with_scalars(iterator, shifted_chebyshev_polynomial_v_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"shifted_chebyshev_polynomial_v_hip\", [&]() {\n gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {\n return shifted_chebyshev_polynomial_v_forward(x, n);\n });\n });\n#endif\n} // shifted_chebyshev_polynomial_v_kernel_hip\n\n} // namespace (anonymous)\n\nREGISTER_DISPATCH(shifted_chebyshev_polynomial_v_stub, &shifted_chebyshev_polynomial_v_kernel_hip);\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_CUDA char shifted_chebyshev_polynomial_w_name[] = \"shifted_chebyshev_polynomial_w_forward\";\n\n void shifted_chebyshev_polynomial_w_kernel_cuda(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"shifted_chebyshev_polynomial_w_cuda\", [&]() {\n opmath_jitted_gpu_kernel_with_scalars(iterator, shifted_chebyshev_polynomial_w_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"shifted_chebyshev_polynomial_w_cuda\", [&]() {\n gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {\n return shifted_chebyshev_polynomial_w_forward(x, n);\n });\n });\n#endif\n } // shifted_chebyshev_polynomial_w_kernel_cuda\n } // namespace (anonymous)\n\n REGISTER_DISPATCH(shifted_chebyshev_polynomial_w_stub, &shifted_chebyshev_polynomial_w_kernel_cuda);\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_HIP char shifted_chebyshev_polynomial_w_name[] = \"shifted_chebyshev_polynomial_w_forward\";\n\n void shifted_chebyshev_polynomial_w_kernel_hip(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"shifted_chebyshev_polynomial_w_hip\", [&]() {\n opmath_jitted_gpu_kernel_with_scalars(iterator, shifted_chebyshev_polynomial_w_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"shifted_chebyshev_polynomial_w_hip\", [&]() {\n gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {\n return shifted_chebyshev_polynomial_w_forward(x, n);\n });\n });\n#endif\n } // shifted_chebyshev_polynomial_w_kernel_hip\n } // namespace (anonymous)\n\n REGISTER_DISPATCH(shifted_chebyshev_polynomial_w_stub, &shifted_chebyshev_polynomial_w_kernel_hip);\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_ONLY_METHOD_OPERATORS\n#include \n#include \n#include \n\nnamespace at::native {\n\nstd::vector infer_dense_strides_dim_last(const Tensor & self, int64_t dim) {\n int64_t ndim = self.dim();\n // sort the strides in descending order according to its value,\n // keeping dim the last.\n std::vector strides = self.strides().vec();\n strides[dim] = -1;\n std::vector original_dim(ndim);\n for (int64_t i = 0; i < ndim; i++) {\n original_dim[i] = i;\n }\n thrust::stable_sort_by_key(\n thrust::host, strides.data(), strides.data() + ndim, original_dim.data(),\n thrust::greater()\n );\n // generate contiguous strides on permuted dims\n std::vector new_strides(ndim);\n std::vector new_strides_unsort(ndim);\n int64_t cumprod = 1;\n for (int64_t i = 0; i < ndim; i++) {\n new_strides[ndim - 1 - i] = cumprod;\n cumprod *= self.sizes()[original_dim[ndim - 1 - i]];\n }\n // unsort new strides\n for (int64_t i = 0; i < ndim; i++) {\n new_strides_unsort[original_dim[i]] = new_strides[i];\n }\n return new_strides_unsort;\n}\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_ONLY_METHOD_OPERATORS\n#include \n#include \n#include \n\nnamespace at::native {\n\nstd::vector infer_dense_strides_dim_last(const Tensor & self, int64_t dim) {\n int64_t ndim = self.dim();\n // sort the strides in descending order according to its value,\n // keeping dim the last.\n std::vector strides = self.strides().vec();\n strides[dim] = -1;\n std::vector original_dim(ndim);\n for (int64_t i = 0; i < ndim; i++) {\n original_dim[i] = i;\n }\n thrust::stable_sort_by_key(\n thrust::host, strides.data(), strides.data() + ndim, original_dim.data(),\n thrust::greater()\n );\n // generate contiguous strides on permuted dims\n std::vector new_strides(ndim);\n std::vector new_strides_unsort(ndim);\n int64_t cumprod = 1;\n for (int64_t i = 0; i < ndim; i++) {\n new_strides[ndim - 1 - i] = cumprod;\n cumprod *= self.sizes()[original_dim[ndim - 1 - i]];\n }\n // unsort new strides\n for (int64_t i = 0; i < ndim; i++) {\n new_strides_unsort[original_dim[i]] = new_strides[i];\n }\n return new_strides_unsort;\n}\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_ONLY_METHOD_OPERATORS\n#include \n#include \n\n#ifndef AT_PER_OPERATOR_HEADERS\n#include \n#else\n#include \n#endif\n\nnamespace at::native {\n// sparse, sparse, sparse, dense, real, real -> sparse\nTensor& _sspaddmm_out_only_sparse_cuda(const Tensor& self,\n const Tensor& mat1, const Tensor& mat2, const Scalar& beta, const Scalar& alpha, Tensor& result) {\n AT_ERROR(\"tensor.sspaddmm(...) can only be called on sparse tensors\");\n}\nTensor& _sspaddmm_out_cuda(const Tensor& self,\n const Tensor& mat1, const Tensor& mat2, const Scalar& beta, const Scalar& alpha, Tensor& result) {\n AT_ERROR(\"NYI: CUDA sspaddmm is not implemented\");\n}\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_ONLY_METHOD_OPERATORS\n#include \n#include \n\n#ifndef AT_PER_OPERATOR_HEADERS\n#include \n#else\n#include \n#endif\n\nnamespace at::native {\n// sparse, sparse, sparse, dense, real, real -> sparse\nTensor& _sspaddmm_out_only_sparse_hip(const Tensor& self,\n const Tensor& mat1, const Tensor& mat2, const Scalar& beta, const Scalar& alpha, Tensor& result) {\n AT_ERROR(\"tensor.sspaddmm(...) can only be called on sparse tensors\");\n}\nTensor& _sspaddmm_out_hip(const Tensor& self,\n const Tensor& mat1, const Tensor& mat2, const Scalar& beta, const Scalar& alpha, Tensor& result) {\n AT_ERROR(\"NYI: HIP sspaddmm is not implemented\");\n}\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_CUDA char spherical_bessel_j0_name[] = \"spherical_bessel_j0_forward\";\n\n void spherical_bessel_j0_kernel_cuda(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"spherical_bessel_j0_cuda\", [&]() {\n jitted_gpu_kernel(iterator, spherical_bessel_j0_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"spherical_bessel_j0_cuda\", [&]() {\n gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {\n return spherical_bessel_j0_forward(a);\n });\n });\n#endif // AT_USE_JITERATOR()\n }\n }\n\n REGISTER_DISPATCH(special_spherical_bessel_j0_stub, &spherical_bessel_j0_kernel_cuda);\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_HIP char spherical_bessel_j0_name[] = \"spherical_bessel_j0_forward\";\n\n void spherical_bessel_j0_kernel_hip(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"spherical_bessel_j0_hip\", [&]() {\n jitted_gpu_kernel(iterator, spherical_bessel_j0_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"spherical_bessel_j0_hip\", [&]() {\n gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {\n return spherical_bessel_j0_forward(a);\n });\n });\n#endif // AT_USE_JITERATOR()\n }\n }\n\n REGISTER_DISPATCH(special_spherical_bessel_j0_stub, &spherical_bessel_j0_kernel_hip);\n} // namespace at::native\n###" }, { "cuda": "\n#pragma once\n\n#include \n#include \n#include \n\nnamespace at {\nnamespace cuda {\nnamespace detail {\n\nTORCH_CUDA_CU_API bool maybeOverlappingIndices(const at::TensorBase &t);\nusing at::native::canUse32BitIndexMath;\n\ntemplate \nTensorInfo\ngetTensorInfo(const at::TensorBase &t) {\n IndexType sz[MAX_TENSORINFO_DIMS];\n IndexType st[MAX_TENSORINFO_DIMS];\n\n int dims = t.dim();\n for (int i = 0; i < dims; ++i) {\n sz[i] = t.size(i);\n st[i] = t.stride(i);\n }\n\n return TensorInfo(\n t.data_ptr(), dims, sz, st);\n}\n\n} // detail\n} // cuda\n} // at\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#pragma once\n\n#include \n#include \n#include \n\nnamespace at {\nnamespace hip {\nnamespace detail {\n\nTORCH_HIP_CU_API bool maybeOverlappingIndices(const at::TensorBase &t);\nusing at::native::canUse32BitIndexMath;\n\ntemplate \nTensorInfo\ngetTensorInfo(const at::TensorBase &t) {\n IndexType sz[MAX_TENSORINFO_DIMS];\n IndexType st[MAX_TENSORINFO_DIMS];\n\n int dims = t.dim();\n for (int i = 0; i < dims; ++i) {\n sz[i] = t.size(i);\n st[i] = t.stride(i);\n }\n\n return TensorInfo(\n t.data_ptr(), dims, sz, st);\n}\n\n} // detail\n} // cuda\n} // at\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n\n// NOTE: CUDA on Windows requires that the enclosing function\n// of a __device__ lambda not have internal linkage.\n\nnamespace at::native {\n\nvoid nextafter_kernel_cuda(TensorIteratorBase& iter) {\n AT_DISPATCH_FLOATING_TYPES_AND(kBFloat16, iter.common_dtype(), \"nextafter_cuda\", [&]() {\n gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {\n return std::nextafter(a, b);\n });\n });\n}\n\nvoid heaviside_kernel_cuda(TensorIteratorBase& iter) {\n AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBool, kBFloat16, iter.dtype(), \"heaviside_cuda\", [&]() {\n gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {\n return a == 0 ? b : static_cast(a > 0);\n });\n });\n}\n\nREGISTER_DISPATCH(nextafter_stub, &nextafter_kernel_cuda);\nREGISTER_DISPATCH(heaviside_stub, &heaviside_kernel_cuda);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n\n// NOTE: HIP on Windows requires that the enclosing function\n// of a __device__ lambda not have internal linkage.\n\nnamespace at::native {\n\nvoid nextafter_kernel_hip(TensorIteratorBase& iter) {\n AT_DISPATCH_FLOATING_TYPES_AND(kBFloat16, iter.common_dtype(), \"nextafter_hip\", [&]() {\n gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {\n return std::nextafter(a, b);\n });\n });\n}\n\nvoid heaviside_kernel_hip(TensorIteratorBase& iter) {\n AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBool, kBFloat16, iter.dtype(), \"heaviside_hip\", [&]() {\n gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {\n return a == 0 ? b : static_cast(a > 0);\n });\n });\n}\n\nREGISTER_DISPATCH(nextafter_stub, &nextafter_kernel_hip);\nREGISTER_DISPATCH(heaviside_stub, &heaviside_kernel_hip);\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\n#if 0 && AT_USE_JITERATOR()\nCONSTEXPR_EXCEPT_WIN_CUDA char acosh_name[] = \"acosh_impl\";\n#endif\n\nvoid acosh_kernel_cuda(TensorIteratorBase& iter) {\n auto common_dtype = iter.common_dtype();\n if(at::isComplexType(common_dtype)) {\n // Disabled due to accuracy issues\n#if 0 && AT_USE_JITERATOR()\n static const auto acosh_string = jiterator_stringify(\n template \n T acosh_impl(T a) {\n return std::acosh(a);\n }\n );\n AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, \"acosh_name\", [&]() {\n jitted_gpu_kernel<\n /*name=*/ acosh_name,\n /*return_dtype=*/ scalar_t,\n /*common_dtype=*/ scalar_t,\n /*arity=*/ 1>(iter, acosh_string);\n });\n#else\n AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, \"acosh_name\", [&]() {\n gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {\n using opmath_t = at::opmath_type;\n return ::acosh(static_cast(a));\n });\n });\n#endif\n } else {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n ScalarType::Half, ScalarType::BFloat16,\n common_dtype, \"acosh_cuda\",\n [&]() {\n gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {\n return ::acosh(a);\n });\n });\n }\n}\n\nREGISTER_DISPATCH(acosh_stub, &acosh_kernel_cuda);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\n#if 0 && AT_USE_JITERATOR()\nCONSTEXPR_EXCEPT_WIN_HIP char acosh_name[] = \"acosh_impl\";\n#endif\n\nvoid acosh_kernel_hip(TensorIteratorBase& iter) {\n auto common_dtype = iter.common_dtype();\n if(at::isComplexType(common_dtype)) {\n // Disabled due to accuracy issues\n#if 0 && AT_USE_JITERATOR()\n static const auto acosh_string = jiterator_stringify(\n template \n T acosh_impl(T a) {\n return std::acosh(a);\n }\n );\n AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, \"acosh_name\", [&]() {\n jitted_gpu_kernel<\n /*name=*/ acosh_name,\n /*return_dtype=*/ scalar_t,\n /*common_dtype=*/ scalar_t,\n /*arity=*/ 1>(iter, acosh_string);\n });\n#else\n AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, \"acosh_name\", [&]() {\n gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {\n using opmath_t = at::opmath_type;\n return ::acosh(static_cast(a));\n });\n });\n#endif\n } else {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n ScalarType::Half, ScalarType::BFloat16,\n common_dtype, \"acosh_hip\",\n [&]() {\n gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {\n return ::acosh(a);\n });\n });\n }\n}\n\nREGISTER_DISPATCH(acosh_stub, &acosh_kernel_hip);\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\n#if 0 && AT_USE_JITERATOR()\nCONSTEXPR_EXCEPT_WIN_CUDA char acos_name[] = \"acos_impl\";\n#endif\nvoid acos_kernel_cuda(TensorIteratorBase& iter) {\n auto common_dtype = iter.common_dtype();\n if (at::isComplexType(common_dtype)) {\n // Disabled due to accuracy issues\n#if 0 && AT_USE_JITERATOR()\n static const auto acos_string = jiterator_stringify(\n template T acos_impl(T a) { return std::acos(a); });\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"acos_name\", [&]() {\n jitted_gpu_kernel<\n /*name=*/acos_name,\n /*return_dtype=*/scalar_t,\n /*common_dtype=*/scalar_t,\n /*arity=*/1>(iter, acos_string);\n });\n#else\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"acos_name\", [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {\n using opmath_t = at::opmath_type;\n return ::acos(static_cast(a));\n });\n });\n#endif\n } else {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n ScalarType::Half,\n ScalarType::BFloat16,\n common_dtype,\n \"acos_cuda\",\n [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {\n return ::acos(a);\n });\n });\n }\n}\n\nREGISTER_DISPATCH(acos_stub, &acos_kernel_cuda);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\n#if 0 && AT_USE_JITERATOR()\nCONSTEXPR_EXCEPT_WIN_HIP char acos_name[] = \"acos_impl\";\n#endif\nvoid acos_kernel_hip(TensorIteratorBase& iter) {\n auto common_dtype = iter.common_dtype();\n if (at::isComplexType(common_dtype)) {\n // Disabled due to accuracy issues\n#if 0 && AT_USE_JITERATOR()\n static const auto acos_string = jiterator_stringify(\n template T acos_impl(T a) { return std::acos(a); });\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"acos_name\", [&]() {\n jitted_gpu_kernel<\n /*name=*/acos_name,\n /*return_dtype=*/scalar_t,\n /*common_dtype=*/scalar_t,\n /*arity=*/1>(iter, acos_string);\n });\n#else\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"acos_name\", [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {\n using opmath_t = at::opmath_type;\n return ::acos(static_cast(a));\n });\n });\n#endif\n } else {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n ScalarType::Half,\n ScalarType::BFloat16,\n common_dtype,\n \"acos_hip\",\n [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {\n return ::acos(a);\n });\n });\n }\n}\n\nREGISTER_DISPATCH(acos_stub, &acos_kernel_hip);\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\n#if 0 && AT_USE_JITERATOR()\nCONSTEXPR_EXCEPT_WIN_CUDA char asinh_name[] = \"asinh_impl\";\n#endif\n\nvoid asinh_kernel_cuda(TensorIteratorBase& iter) {\n auto common_dtype = iter.common_dtype();\n if (at::isComplexType(common_dtype)) {\n // Disabled due to accuracy issues\n#if 0 && AT_USE_JITERATOR()\n static const auto asinh_string = jiterator_stringify(\n template T asinh_impl(T a) { return std::asinh(a); });\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"asinh_name\", [&]() {\n jitted_gpu_kernel<\n /*name=*/asinh_name,\n /*return_dtype=*/scalar_t,\n /*common_dtype=*/scalar_t,\n /*arity=*/1>(iter, asinh_string);\n });\n#else\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"asinh_name\", [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {\n using opmath_t = at::opmath_type;\n return ::asinh(static_cast(a));\n });\n });\n#endif\n } else {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n ScalarType::Half,\n ScalarType::BFloat16,\n common_dtype,\n \"asinh_cuda\",\n [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {\n return ::asinh(a);\n });\n });\n }\n}\n\nREGISTER_DISPATCH(asinh_stub, &asinh_kernel_cuda);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\n#if 0 && AT_USE_JITERATOR()\nCONSTEXPR_EXCEPT_WIN_HIP char asinh_name[] = \"asinh_impl\";\n#endif\n\nvoid asinh_kernel_hip(TensorIteratorBase& iter) {\n auto common_dtype = iter.common_dtype();\n if (at::isComplexType(common_dtype)) {\n // Disabled due to accuracy issues\n#if 0 && AT_USE_JITERATOR()\n static const auto asinh_string = jiterator_stringify(\n template T asinh_impl(T a) { return std::asinh(a); });\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"asinh_name\", [&]() {\n jitted_gpu_kernel<\n /*name=*/asinh_name,\n /*return_dtype=*/scalar_t,\n /*common_dtype=*/scalar_t,\n /*arity=*/1>(iter, asinh_string);\n });\n#else\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"asinh_name\", [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {\n using opmath_t = at::opmath_type;\n return ::asinh(static_cast(a));\n });\n });\n#endif\n } else {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n ScalarType::Half,\n ScalarType::BFloat16,\n common_dtype,\n \"asinh_hip\",\n [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {\n return ::asinh(a);\n });\n });\n }\n}\n\nREGISTER_DISPATCH(asinh_stub, &asinh_kernel_hip);\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\n#if 0 && AT_USE_JITERATOR()\nCONSTEXPR_EXCEPT_WIN_CUDA char asin_name[] = \"asin_impl\";\n#endif\n\nvoid asin_kernel_cuda(TensorIteratorBase& iter) {\n auto common_dtype = iter.common_dtype();\n if (at::isComplexType(common_dtype)) {\n // Disabled due to accuracy issues\n#if 0 && AT_USE_JITERATOR()\n static const auto asin_string = jiterator_stringify(\n template T asin_impl(T a) { return std::asin(a); });\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"asin_name\", [&]() {\n jitted_gpu_kernel<\n /*name=*/asin_name,\n /*return_dtype=*/scalar_t,\n /*common_dtype=*/scalar_t,\n /*arity=*/1>(iter, asin_string);\n });\n#else\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"asin_name\", [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {\n using opmath_t = at::opmath_type;\n return ::asin(static_cast(a));\n });\n });\n#endif\n } else {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n kHalf, kBFloat16, common_dtype, \"asin_cuda\", [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {\n return ::asin(a);\n });\n });\n }\n}\n\nREGISTER_DISPATCH(asin_stub, &asin_kernel_cuda);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\n#if 0 && AT_USE_JITERATOR()\nCONSTEXPR_EXCEPT_WIN_HIP char asin_name[] = \"asin_impl\";\n#endif\n\nvoid asin_kernel_hip(TensorIteratorBase& iter) {\n auto common_dtype = iter.common_dtype();\n if (at::isComplexType(common_dtype)) {\n // Disabled due to accuracy issues\n#if 0 && AT_USE_JITERATOR()\n static const auto asin_string = jiterator_stringify(\n template T asin_impl(T a) { return std::asin(a); });\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"asin_name\", [&]() {\n jitted_gpu_kernel<\n /*name=*/asin_name,\n /*return_dtype=*/scalar_t,\n /*common_dtype=*/scalar_t,\n /*arity=*/1>(iter, asin_string);\n });\n#else\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"asin_name\", [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {\n using opmath_t = at::opmath_type;\n return ::asin(static_cast(a));\n });\n });\n#endif\n } else {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n kHalf, kBFloat16, common_dtype, \"asin_hip\", [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {\n return ::asin(a);\n });\n });\n }\n}\n\nREGISTER_DISPATCH(asin_stub, &asin_kernel_hip);\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\n#if AT_USE_JITERATOR()\nCONSTEXPR_EXCEPT_WIN_CUDA char atanh_name[] = \"atanh_impl\";\n#endif\n\nvoid atanh_kernel_cuda(TensorIteratorBase& iter) {\n auto common_dtype = iter.common_dtype();\n if (at::isComplexType(common_dtype)) {\n#if AT_USE_JITERATOR()\n static const auto atanh_string = jiterator_stringify(\n template T atanh_impl(T a) { return std::atanh(a); });\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"atanh_name\", [&]() {\n jitted_gpu_kernel<\n /*name=*/atanh_name,\n /*return_dtype=*/scalar_t,\n /*common_dtype=*/scalar_t,\n /*arity=*/1>(iter, atanh_string);\n });\n#else\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"atanh_name\", [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {\n using opmath_t = at::opmath_type;\n return ::atanh(static_cast(a));\n });\n });\n#endif\n } else {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n ScalarType::Half,\n ScalarType::BFloat16,\n common_dtype,\n \"atanh_cuda\",\n [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {\n return ::atanh(a);\n });\n });\n }\n}\n\nREGISTER_DISPATCH(atanh_stub, &atanh_kernel_cuda);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\n#if AT_USE_JITERATOR()\nCONSTEXPR_EXCEPT_WIN_HIP char atanh_name[] = \"atanh_impl\";\n#endif\n\nvoid atanh_kernel_hip(TensorIteratorBase& iter) {\n auto common_dtype = iter.common_dtype();\n if (at::isComplexType(common_dtype)) {\n#if AT_USE_JITERATOR()\n static const auto atanh_string = jiterator_stringify(\n template T atanh_impl(T a) { return std::atanh(a); });\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"atanh_name\", [&]() {\n jitted_gpu_kernel<\n /*name=*/atanh_name,\n /*return_dtype=*/scalar_t,\n /*common_dtype=*/scalar_t,\n /*arity=*/1>(iter, atanh_string);\n });\n#else\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"atanh_name\", [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {\n using opmath_t = at::opmath_type;\n return ::atanh(static_cast(a));\n });\n });\n#endif\n } else {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n ScalarType::Half,\n ScalarType::BFloat16,\n common_dtype,\n \"atanh_hip\",\n [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {\n return ::atanh(a);\n });\n });\n }\n}\n\nREGISTER_DISPATCH(atanh_stub, &atanh_kernel_hip);\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\n#if AT_USE_JITERATOR()\nCONSTEXPR_EXCEPT_WIN_CUDA char atan_name[] = \"atan_impl\";\n#endif\n\nvoid atan_kernel_cuda(TensorIteratorBase& iter) {\n auto common_dtype = iter.common_dtype();\n if (at::isComplexType(common_dtype)) {\n#if AT_USE_JITERATOR()\n static const auto atan_string = jiterator_stringify(\n template \n T atan_impl(T a) {\n return std::atan(a);\n }\n );\n AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, \"atan_name\", [&]() {\n jitted_gpu_kernel<\n /*name=*/ atan_name,\n /*return_dtype=*/ scalar_t,\n /*common_dtype=*/ scalar_t,\n /*arity=*/ 1>(iter, atan_string);\n });\n#else\n AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, \"atan_name\", [&]() {\n gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {\n using opmath_t = at::opmath_type;\n return ::atan(static_cast(a));\n });\n });\n#endif\n } else {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n ScalarType::Half, ScalarType::BFloat16,\n common_dtype, \"atan_cuda\",\n [&]() {\n gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {\n return ::atan(a);\n });\n });\n }\n}\n\nREGISTER_DISPATCH(atan_stub, &atan_kernel_cuda);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\n#if AT_USE_JITERATOR()\nCONSTEXPR_EXCEPT_WIN_HIP char atan_name[] = \"atan_impl\";\n#endif\n\nvoid atan_kernel_hip(TensorIteratorBase& iter) {\n auto common_dtype = iter.common_dtype();\n if (at::isComplexType(common_dtype)) {\n#if AT_USE_JITERATOR()\n static const auto atan_string = jiterator_stringify(\n template \n T atan_impl(T a) {\n return std::atan(a);\n }\n );\n AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, \"atan_name\", [&]() {\n jitted_gpu_kernel<\n /*name=*/ atan_name,\n /*return_dtype=*/ scalar_t,\n /*common_dtype=*/ scalar_t,\n /*arity=*/ 1>(iter, atan_string);\n });\n#else\n AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, \"atan_name\", [&]() {\n gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {\n using opmath_t = at::opmath_type;\n return ::atan(static_cast(a));\n });\n });\n#endif\n } else {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n ScalarType::Half, ScalarType::BFloat16,\n common_dtype, \"atan_hip\",\n [&]() {\n gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {\n return ::atan(a);\n });\n });\n }\n}\n\nREGISTER_DISPATCH(atan_stub, &atan_kernel_hip);\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\n#if AT_USE_JITERATOR()\nCONSTEXPR_EXCEPT_WIN_CUDA char cosh_name[] = \"cosh_impl\";\n#endif\n\nvoid cosh_kernel_cuda(TensorIteratorBase& iter) {\n auto common_dtype = iter.common_dtype();\n if (at::isComplexType(common_dtype)) {\n#if AT_USE_JITERATOR()\n static const auto cosh_string = jiterator_stringify(\n template T cosh_impl(T a) { return std::cosh(a); });\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"cosh_name\", [&]() {\n jitted_gpu_kernel<\n /*name=*/cosh_name,\n /*return_dtype=*/scalar_t,\n /*common_dtype=*/scalar_t,\n /*arity=*/1>(iter, cosh_string);\n });\n#else\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"cosh_name\", [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {\n using opmath_t = at::opmath_type;\n return ::cosh(static_cast(a));\n });\n });\n#endif\n } else {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n ScalarType::Half,\n ScalarType::BFloat16,\n common_dtype,\n \"cosh_cuda\",\n [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {\n return ::cosh(a);\n });\n });\n }\n}\n\nREGISTER_DISPATCH(cosh_stub, &cosh_kernel_cuda);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\n#if AT_USE_JITERATOR()\nCONSTEXPR_EXCEPT_WIN_HIP char cosh_name[] = \"cosh_impl\";\n#endif\n\nvoid cosh_kernel_hip(TensorIteratorBase& iter) {\n auto common_dtype = iter.common_dtype();\n if (at::isComplexType(common_dtype)) {\n#if AT_USE_JITERATOR()\n static const auto cosh_string = jiterator_stringify(\n template T cosh_impl(T a) { return std::cosh(a); });\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"cosh_name\", [&]() {\n jitted_gpu_kernel<\n /*name=*/cosh_name,\n /*return_dtype=*/scalar_t,\n /*common_dtype=*/scalar_t,\n /*arity=*/1>(iter, cosh_string);\n });\n#else\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"cosh_name\", [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {\n using opmath_t = at::opmath_type;\n return ::cosh(static_cast(a));\n });\n });\n#endif\n } else {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n ScalarType::Half,\n ScalarType::BFloat16,\n common_dtype,\n \"cosh_hip\",\n [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {\n return ::cosh(a);\n });\n });\n }\n}\n\nREGISTER_DISPATCH(cosh_stub, &cosh_kernel_hip);\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\n#if AT_USE_JITERATOR()\nCONSTEXPR_EXCEPT_WIN_CUDA char cos_name[] = \"cos_impl\";\n#endif // AT_USE_JITERATOR()\n\nvoid cos_kernel_cuda(TensorIteratorBase& iter) {\n auto common_dtype = iter.common_dtype();\n if (at::isComplexType(common_dtype)) {\n#if AT_USE_JITERATOR()\n static const auto cos_string = jiterator_stringify(\n template T cos_impl(T a) { return std::cos(a); });\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"cos_name\", [&]() {\n jitted_gpu_kernel<\n /*name=*/cos_name,\n /*return_dtype=*/scalar_t,\n /*common_dtype=*/scalar_t,\n /*arity=*/1>(iter, cos_string);\n });\n#else\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"cos_name\", [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {\n using opmath_t = at::opmath_type;\n return ::cos(static_cast(a));\n });\n });\n#endif\n } else {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n ScalarType::Half,\n ScalarType::BFloat16,\n common_dtype,\n \"cos_cuda\",\n [&]() {\n gpu_kernel(\n iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::cos(a); });\n });\n }\n}\n\nREGISTER_DISPATCH(cos_stub, &cos_kernel_cuda);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\n#if AT_USE_JITERATOR()\nCONSTEXPR_EXCEPT_WIN_HIP char cos_name[] = \"cos_impl\";\n#endif // AT_USE_JITERATOR()\n\nvoid cos_kernel_hip(TensorIteratorBase& iter) {\n auto common_dtype = iter.common_dtype();\n if (at::isComplexType(common_dtype)) {\n#if AT_USE_JITERATOR()\n static const auto cos_string = jiterator_stringify(\n template T cos_impl(T a) { return std::cos(a); });\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"cos_name\", [&]() {\n jitted_gpu_kernel<\n /*name=*/cos_name,\n /*return_dtype=*/scalar_t,\n /*common_dtype=*/scalar_t,\n /*arity=*/1>(iter, cos_string);\n });\n#else\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"cos_name\", [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {\n using opmath_t = at::opmath_type;\n return ::cos(static_cast(a));\n });\n });\n#endif\n } else {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n ScalarType::Half,\n ScalarType::BFloat16,\n common_dtype,\n \"cos_hip\",\n [&]() {\n gpu_kernel(\n iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::cos(a); });\n });\n }\n}\n\nREGISTER_DISPATCH(cos_stub, &cos_kernel_hip);\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\n#if AT_USE_JITERATOR()\nCONSTEXPR_EXCEPT_WIN_CUDA char sinh_name[] = \"sinh_impl\";\n#endif\n\nvoid sinh_kernel_cuda(TensorIteratorBase& iter) {\n auto common_dtype = iter.common_dtype();\n if (at::isComplexType(common_dtype)) {\n#if AT_USE_JITERATOR()\n static const auto sinh_string = jiterator_stringify(\n template T sinh_impl(T a) { return std::sinh(a); });\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"sinh_name\", [&]() {\n jitted_gpu_kernel<\n /*name=*/sinh_name,\n /*return_dtype=*/scalar_t,\n /*common_dtype=*/scalar_t,\n /*arity=*/1>(iter, sinh_string);\n });\n#else\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"sinh_name\", [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {\n using opmath_t = at::opmath_type;\n return ::sinh(static_cast(a));\n });\n });\n#endif\n } else {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n ScalarType::Half,\n ScalarType::BFloat16,\n common_dtype,\n \"sinh_cuda\",\n [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {\n return ::sinh(a);\n });\n });\n }\n}\n\nREGISTER_DISPATCH(sinh_stub, &sinh_kernel_cuda);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\n#if AT_USE_JITERATOR()\nCONSTEXPR_EXCEPT_WIN_HIP char sinh_name[] = \"sinh_impl\";\n#endif\n\nvoid sinh_kernel_hip(TensorIteratorBase& iter) {\n auto common_dtype = iter.common_dtype();\n if (at::isComplexType(common_dtype)) {\n#if AT_USE_JITERATOR()\n static const auto sinh_string = jiterator_stringify(\n template T sinh_impl(T a) { return std::sinh(a); });\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"sinh_name\", [&]() {\n jitted_gpu_kernel<\n /*name=*/sinh_name,\n /*return_dtype=*/scalar_t,\n /*common_dtype=*/scalar_t,\n /*arity=*/1>(iter, sinh_string);\n });\n#else\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"sinh_name\", [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {\n using opmath_t = at::opmath_type;\n return ::sinh(static_cast(a));\n });\n });\n#endif\n } else {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n ScalarType::Half,\n ScalarType::BFloat16,\n common_dtype,\n \"sinh_hip\",\n [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {\n return ::sinh(a);\n });\n });\n }\n}\n\nREGISTER_DISPATCH(sinh_stub, &sinh_kernel_hip);\n\n} // namespace at::native\n###" }, { "cuda": "\n#pragma once\n#include \n#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)\n#include \n#endif\nnamespace at {\nnamespace cuda {\nnamespace detail {\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ntemplate \nstruct DivMod {\n Value div, mod;\n C10_HOST_DEVICE DivMod(Value div, Value mod) : div(div), mod(mod) { }\n};\n\n\ntemplate \nstruct IntDivider {\n IntDivider() = default;\n IntDivider(Value d) : divisor(d) { }\n C10_HOST_DEVICE inline Value div(Value n) const { return n / divisor; }\n C10_HOST_DEVICE inline Value mod(Value n) const { return n % divisor; }\n C10_HOST_DEVICE inline DivMod divmod(Value n) const {\n return DivMod(n / divisor, n % divisor);\n }\n Value divisor;\n};\n\ntemplate <>\nstruct IntDivider {\n static_assert(sizeof(unsigned int) == 4, \"Assumes 32-bit unsigned int.\");\n IntDivider() = default;\n IntDivider(unsigned int d) : divisor(d) {\n assert(divisor >= 1 && divisor <= INT32_MAX);\n \n for (shift = 0; shift < 32; shift++) if ((1U << shift) >= divisor) break;\n uint64_t one = 1;\n uint64_t magic = ((one << 32) * ((one << shift) - divisor)) / divisor + 1;\n m1 = magic;\n assert(m1 > 0 && m1 == magic); \n }\n C10_HOST_DEVICE inline unsigned int div(unsigned int n) const {\n#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)\n \n \n unsigned int t = __umulhi(n, m1);\n return (t + n) >> shift;\n#else\n \n uint64_t t = ((uint64_t) n * m1) >> 32;\n return (t + n) >> shift;\n#endif\n }\n C10_HOST_DEVICE inline unsigned int mod(unsigned int n) const {\n return n - div(n) * divisor;\n }\n C10_HOST_DEVICE inline DivMod divmod(unsigned int n) const {\n unsigned int q = div(n);\n return DivMod(q, n - q * divisor);\n }\n unsigned int divisor; \n unsigned int m1; \n unsigned int shift; \n};\n}}} \n\n###", "hip": " \n#pragma once\n#include \n#if defined(__HIP_ARCH__) || defined(__HIP_DEVICE_COMPILE__)\n#include \n#endif\nnamespace at {\nnamespace hip {\nnamespace detail {\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ntemplate \nstruct DivMod {\n Value div, mod;\n C10_HOST_DEVICE DivMod(Value div, Value mod) : div(div), mod(mod) { }\n};\n\n\ntemplate \nstruct IntDivider {\n IntDivider() = default;\n IntDivider(Value d) : divisor(d) { }\n C10_HOST_DEVICE inline Value div(Value n) const { return n / divisor; }\n C10_HOST_DEVICE inline Value mod(Value n) const { return n % divisor; }\n C10_HOST_DEVICE inline DivMod divmod(Value n) const {\n return DivMod(n / divisor, n % divisor);\n }\n Value divisor;\n};\n\ntemplate <>\nstruct IntDivider {\n static_assert(sizeof(unsigned int) == 4, \"Assumes 32-bit unsigned int.\");\n IntDivider() = default;\n IntDivider(unsigned int d) : divisor(d) {\n assert(divisor >= 1 && divisor <= INT32_MAX);\n \n for (shift = 0; shift < 32; shift++) if ((1U << shift) >= divisor) break;\n uint64_t one = 1;\n uint64_t magic = ((one << 32) * ((one << shift) - divisor)) / divisor + 1;\n m1 = magic;\n assert(m1 > 0 && m1 == magic); \n }\n C10_HOST_DEVICE inline unsigned int div(unsigned int n) const {\n#if defined(__HIP_ARCH__) || defined(__HIP_DEVICE_COMPILE__)\n \n \n unsigned int t = __umulhi(n, m1);\n return (t + n) >> shift;\n#else\n \n uint64_t t = ((uint64_t) n * m1) >> 32;\n return (t + n) >> shift;\n#endif\n }\n C10_HOST_DEVICE inline unsigned int mod(unsigned int n) const {\n return n - div(n) * divisor;\n }\n C10_HOST_DEVICE inline DivMod divmod(unsigned int n) const {\n unsigned int q = div(n);\n return DivMod(q, n - q * divisor);\n }\n unsigned int divisor; \n unsigned int m1; \n unsigned int shift; \n};\n}}} ###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\n#if AT_USE_JITERATOR()\nCONSTEXPR_EXCEPT_WIN_CUDA char sin_name[] = \"sin_impl\";\n#endif\n\nvoid sin_kernel_cuda(TensorIteratorBase& iter) {\n auto common_dtype = iter.common_dtype();\n if (at::isComplexType(common_dtype)) {\n#if AT_USE_JITERATOR()\n static const auto sin_string = jiterator_stringify(\n template T sin_impl(T a) { return std::sin(a); });\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"sin_name\", [&]() {\n jitted_gpu_kernel<\n /*name=*/sin_name,\n /*return_dtype=*/scalar_t,\n /*common_dtype=*/scalar_t,\n /*arity=*/1>(iter, sin_string);\n });\n#else\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"sin_name\", [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {\n using opmath_t = at::opmath_type;\n return ::sin(static_cast(a));\n });\n });\n#endif\n } else {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n ScalarType::Half,\n ScalarType::BFloat16,\n common_dtype,\n \"sin_cuda\",\n [&]() {\n gpu_kernel(\n iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::sin(a); });\n });\n }\n}\n\nREGISTER_DISPATCH(sin_stub, &sin_kernel_cuda);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\n#if AT_USE_JITERATOR()\nCONSTEXPR_EXCEPT_WIN_HIP char sin_name[] = \"sin_impl\";\n#endif\n\nvoid sin_kernel_hip(TensorIteratorBase& iter) {\n auto common_dtype = iter.common_dtype();\n if (at::isComplexType(common_dtype)) {\n#if AT_USE_JITERATOR()\n static const auto sin_string = jiterator_stringify(\n template T sin_impl(T a) { return std::sin(a); });\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"sin_name\", [&]() {\n jitted_gpu_kernel<\n /*name=*/sin_name,\n /*return_dtype=*/scalar_t,\n /*common_dtype=*/scalar_t,\n /*arity=*/1>(iter, sin_string);\n });\n#else\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"sin_name\", [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {\n using opmath_t = at::opmath_type;\n return ::sin(static_cast(a));\n });\n });\n#endif\n } else {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n ScalarType::Half,\n ScalarType::BFloat16,\n common_dtype,\n \"sin_hip\",\n [&]() {\n gpu_kernel(\n iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::sin(a); });\n });\n }\n}\n\nREGISTER_DISPATCH(sin_stub, &sin_kernel_hip);\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\n#if 0 && AT_USE_JITERATOR()\nCONSTEXPR_EXCEPT_WIN_CUDA char tanh_name[] = \"tanh_impl\";\n#endif\n\nvoid tanh_kernel_cuda(TensorIteratorBase& iter) {\n auto common_dtype = iter.common_dtype();\n if (at::isComplexType(common_dtype)) {\n // Disabled due to accuracy issues\n#if 0 && AT_USE_JITERATOR()\n static const auto tanh_string = jiterator_stringify(\n template T tanh_impl(T a) { return std::tanh(a); });\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"tanh_name\", [&]() {\n jitted_gpu_kernel<\n /*name=*/tanh_name,\n /*return_dtype=*/scalar_t,\n /*common_dtype=*/scalar_t,\n /*arity=*/1>(iter, tanh_string);\n });\n#else\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"tanh_name\", [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {\n using opmath_t = at::opmath_type;\n return ::tanh(static_cast(a));\n });\n });\n#endif\n } else {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n ScalarType::Half,\n ScalarType::BFloat16,\n common_dtype,\n \"tanh_cuda\",\n [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {\n return ::tanh(a);\n });\n });\n }\n}\n\nREGISTER_DISPATCH(tanh_stub, &tanh_kernel_cuda);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\n#if 0 && AT_USE_JITERATOR()\nCONSTEXPR_EXCEPT_WIN_HIP char tanh_name[] = \"tanh_impl\";\n#endif\n\nvoid tanh_kernel_hip(TensorIteratorBase& iter) {\n auto common_dtype = iter.common_dtype();\n if (at::isComplexType(common_dtype)) {\n // Disabled due to accuracy issues\n#if 0 && AT_USE_JITERATOR()\n static const auto tanh_string = jiterator_stringify(\n template T tanh_impl(T a) { return std::tanh(a); });\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"tanh_name\", [&]() {\n jitted_gpu_kernel<\n /*name=*/tanh_name,\n /*return_dtype=*/scalar_t,\n /*common_dtype=*/scalar_t,\n /*arity=*/1>(iter, tanh_string);\n });\n#else\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"tanh_name\", [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {\n using opmath_t = at::opmath_type;\n return ::tanh(static_cast(a));\n });\n });\n#endif\n } else {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n ScalarType::Half,\n ScalarType::BFloat16,\n common_dtype,\n \"tanh_hip\",\n [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {\n return ::tanh(a);\n });\n });\n }\n}\n\nREGISTER_DISPATCH(tanh_stub, &tanh_kernel_hip);\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\n#if 0 && AT_USE_JITERATOR()\nCONSTEXPR_EXCEPT_WIN_CUDA char tan_name[] = \"tan_impl\";\n#endif\n\nvoid tan_kernel_cuda(TensorIteratorBase& iter) {\n auto common_dtype = iter.common_dtype();\n if (at::isComplexType(common_dtype)) {\n // Disabled due to accuracy issues\n#if 0 && AT_USE_JITERATOR()\n static const auto tan_string = jiterator_stringify(\n template T tan_impl(T a) { return std::tan(a); });\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"tan_name\", [&]() {\n jitted_gpu_kernel<\n /*name=*/tan_name,\n /*return_dtype=*/scalar_t,\n /*common_dtype=*/scalar_t,\n /*arity=*/1>(iter, tan_string);\n });\n#else\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"tan_name\", [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {\n using opmath_t = at::opmath_type;\n return ::tan(static_cast(a));\n });\n });\n#endif\n } else {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n ScalarType::Half,\n ScalarType::BFloat16,\n common_dtype,\n \"tan_cuda\",\n [&]() {\n gpu_kernel(\n iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::tan(a); });\n });\n }\n}\n\nREGISTER_DISPATCH(tan_stub, &tan_kernel_cuda);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\n#if 0 && AT_USE_JITERATOR()\nCONSTEXPR_EXCEPT_WIN_HIP char tan_name[] = \"tan_impl\";\n#endif\n\nvoid tan_kernel_hip(TensorIteratorBase& iter) {\n auto common_dtype = iter.common_dtype();\n if (at::isComplexType(common_dtype)) {\n // Disabled due to accuracy issues\n#if 0 && AT_USE_JITERATOR()\n static const auto tan_string = jiterator_stringify(\n template T tan_impl(T a) { return std::tan(a); });\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"tan_name\", [&]() {\n jitted_gpu_kernel<\n /*name=*/tan_name,\n /*return_dtype=*/scalar_t,\n /*common_dtype=*/scalar_t,\n /*arity=*/1>(iter, tan_string);\n });\n#else\n AT_DISPATCH_COMPLEX_TYPES_AND(\n kComplexHalf, common_dtype, \"tan_name\", [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {\n using opmath_t = at::opmath_type;\n return ::tan(static_cast(a));\n });\n });\n#endif\n } else {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n ScalarType::Half,\n ScalarType::BFloat16,\n common_dtype,\n \"tan_hip\",\n [&]() {\n gpu_kernel(\n iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::tan(a); });\n });\n }\n}\n\nREGISTER_DISPATCH(tan_stub, &tan_kernel_hip);\n\n} // namespace at::native\n###" }, { "cuda": "\n#include \n\nnamespace at {\nnamespace native {\nnamespace internal {\n\ntemplate \nstd::tuple unique_cuda_template(\n const Tensor& self,\n const bool consecutive,\n const bool return_inverse,\n const bool return_counts);\n\n} // namespace internal\n} // namespace at\n} // namespace native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \n\nnamespace at {\nnamespace native {\nnamespace internal {\n\ntemplate \nstd::tuple unique_hip_template(\n const Tensor& self,\n const bool consecutive,\n const bool return_inverse,\n const bool return_counts);\n\n} // namespace internal\n} // namespace at\n} // namespace native\n###" }, { "cuda": "\n#define TORCH_ASSERT_ONLY_METHOD_OPERATORS\n#include \n#include \n\nnamespace at::native {\n\nnamespace {\n\ntemplate \nstruct CUDAKernelLauncher {\n static void launch(TensorIteratorBase& iter, const func_t& f) {\n gpu_kernel(iter, f);\n }\n};\n\n}\n\nvoid _validate_compressed_sparse_indices_cuda(\n const bool is_crow,\n const Tensor& cidx,\n const Tensor& idx,\n const int64_t cdim,\n const int64_t dim,\n const int64_t nnz) {\n validate_compressed_sparse_indices_kernel(\n is_crow, cidx, idx, cdim, dim, nnz);\n}\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_ONLY_METHOD_OPERATORS\n#include \n#include \n\nnamespace at::native {\n\nnamespace {\n\ntemplate \nstruct HIPKernelLauncher {\n static void launch(TensorIteratorBase& iter, const func_t& f) {\n gpu_kernel(iter, f);\n }\n};\n\n}\n\nvoid _validate_compressed_sparse_indices_hip(\n const bool is_crow,\n const Tensor& cidx,\n const Tensor& idx,\n const int64_t cdim,\n const int64_t dim,\n const int64_t nnz) {\n validate_compressed_sparse_indices_kernel(\n is_crow, cidx, idx, cdim, dim, nnz);\n}\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\nnamespace {\n\n/*\n * This function is derived from the implementation of the zeta function in the Cephes Math Library.\n * See note [3-Clause BSD License for the Cephes Math Library].\n */\n// See note [Jiterator]\nCONSTEXPR_EXCEPT_WIN_CUDA char zeta_name[] = \"zeta\";\nvoid zeta_kernel_cuda(TensorIteratorBase& iter) {\n #if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), \"zeta_cuda\", [&]() {\n opmath_jitted_gpu_kernel_with_scalars(iter, zeta_string);\n });\n #else\n AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), \"zeta_cuda\", [&]() {\n gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t x, scalar_t q) -> scalar_t {\n return zeta(x, q);\n });\n });\n #endif //jiterator\n}\n\n} // namespace (anonymous)\n\nREGISTER_DISPATCH(zeta_stub, &zeta_kernel_cuda);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\nnamespace {\n\n/*\n * This function is derived from the implementation of the zeta function in the Cephes Math Library.\n * See note [3-Clause BSD License for the Cephes Math Library].\n */\n// See note [Jiterator]\nCONSTEXPR_EXCEPT_WIN_HIP char zeta_name[] = \"zeta\";\nvoid zeta_kernel_hip(TensorIteratorBase& iter) {\n #if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), \"zeta_hip\", [&]() {\n opmath_jitted_gpu_kernel_with_scalars(iter, zeta_string);\n });\n #else\n AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), \"zeta_hip\", [&]() {\n gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t x, scalar_t q) -> scalar_t {\n return zeta(x, q);\n });\n });\n #endif //jiterator\n}\n\n} // namespace (anonymous)\n\nREGISTER_DISPATCH(zeta_stub, &zeta_kernel_hip);\n\n} // namespace at::native\n###" }, { "cuda": "\n#include \n#include \n#include \n\nnamespace at {\nnamespace native {\n\nTensor& relu_quantized_cuda_(Tensor& self) {\n const auto zero_point = self.q_zero_point();\n AT_DISPATCH_QINT_TYPES(\n self.scalar_type(), \"qrelu_cuda\", [&]() {\n auto iter = TensorIterator::unary_op(self, self);\n gpu_kernel(iter, [zero_point] GPU_LAMBDA(scalar_t value) -> scalar_t {\n return scalar_t(std::max(value.val_, zero_point));\n });\n });\n return self;\n}\n\n} // namespace at::native\n} // namespace at\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \n#include \n#include \n\nnamespace at {\nnamespace native {\n\nTensor& relu_quantized_hip_(Tensor& self) {\n const auto zero_point = self.q_zero_point();\n AT_DISPATCH_QINT_TYPES(\n self.scalar_type(), \"qrelu_hip\", [&]() {\n auto iter = TensorIterator::unary_op(self, self);\n gpu_kernel(iter, [zero_point] GPU_LAMBDA(scalar_t value) -> scalar_t {\n return scalar_t(std::max(value.val_, zero_point));\n });\n });\n return self;\n}\n\n} // namespace at::native\n} // namespace at\n###" }, { "cuda": "\n#define TORCH_ASSERT_ONLY_METHOD_OPERATORS\n#include \n#include \n#include \n#include \n\n#ifndef AT_PER_OPERATOR_HEADERS\n#include \n#include \n#else\n#include \n#include \n#endif\n\nnamespace at {\nnamespace native {\n\nTensor int_repr_quantized_cuda(const Tensor& self) {\n Tensor dst;\n AT_DISPATCH_QINT_TYPES(self.scalar_type(), \"int_repr_quantized_cuda\", [&]() {\n dst = at::empty(\n self.sizes(),\n self.options().dtype(UNDERLYING_TYPE),\n self.suggest_memory_format());\n auto iter = TensorIteratorConfig()\n .check_all_same_dtype(false)\n .add_output(dst)\n .add_input(self)\n .build();\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t value) -> underlying_t {\n return value.val_;\n });\n });\n return dst;\n}\n\n} // namespace native\n} // namespace at\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_ONLY_METHOD_OPERATORS\n#include \n#include \n#include \n#include \n\n#ifndef AT_PER_OPERATOR_HEADERS\n#include \n#include \n#else\n#include \n#include \n#endif\n\nnamespace at {\nnamespace native {\n\nTensor int_repr_quantized_hip(const Tensor& self) {\n Tensor dst;\n AT_DISPATCH_QINT_TYPES(self.scalar_type(), \"int_repr_quantized_hip\", [&]() {\n dst = at::empty(\n self.sizes(),\n self.options().dtype(UNDERLYING_TYPE),\n self.suggest_memory_format());\n auto iter = TensorIteratorConfig()\n .check_all_same_dtype(false)\n .add_output(dst)\n .add_input(self)\n .build();\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t value) -> underlying_t {\n return value.val_;\n });\n });\n return dst;\n}\n\n} // namespace native\n} // namespace at\n###" }, { "cuda": "\n#define TORCH_ASSERT_ONLY_METHOD_OPERATORS\n#include \n#include \n#include \n#include \n\n#ifndef AT_PER_OPERATOR_HEADERS\n#include \n#include \n#else\n#include \n#include \n#include \n#include \n#include \n#endif\n\nnamespace at {\nnamespace native {\n\nvoid assign_quantized_tensor_cuda(\n const Tensor& self, Tensor& dst) {\n AT_DISPATCH_QINT_TYPES(\n dst.scalar_type(), \"assign_quantized_tensor_cuda\", [&]() {\n auto iter = TensorIteratorConfig()\n .check_all_same_dtype(false)\n .add_output(dst)\n .add_input(self)\n .build();\n gpu_kernel(iter, [] GPU_LAMBDA(underlying_t value) -> scalar_t {\n return scalar_t(value);\n });\n });\n}\n\nTensor make_per_tensor_quantized_tensor_cuda(\n const Tensor& self,\n double scale,\n int64_t zero_point) {\n Tensor dst = at::_empty_affine_quantized(\n self.sizes(),\n self.options().dtype(toQIntType(self.scalar_type())),\n scale,\n zero_point);\n assign_quantized_tensor_cuda(self, dst);\n return dst;\n}\n\nTensor make_per_channel_quantized_tensor_cuda(\n const Tensor& self,\n const Tensor& scales,\n const Tensor& zero_points,\n int64_t axis) {\n Tensor dst = at::_empty_per_channel_affine_quantized(\n self.sizes(),\n scales,\n zero_points,\n axis,\n self.options().dtype(toQIntType(self.scalar_type())));\n assign_quantized_tensor_cuda(self, dst);\n return dst;\n}\n\n} // namespace native\n} // namespace at\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_ONLY_METHOD_OPERATORS\n#include \n#include \n#include \n#include \n\n#ifndef AT_PER_OPERATOR_HEADERS\n#include \n#include \n#else\n#include \n#include \n#include \n#include \n#include \n#endif\n\nnamespace at {\nnamespace native {\n\nvoid assign_quantized_tensor_hip(\n const Tensor& self, Tensor& dst) {\n AT_DISPATCH_QINT_TYPES(\n dst.scalar_type(), \"assign_quantized_tensor_hip\", [&]() {\n auto iter = TensorIteratorConfig()\n .check_all_same_dtype(false)\n .add_output(dst)\n .add_input(self)\n .build();\n gpu_kernel(iter, [] GPU_LAMBDA(underlying_t value) -> scalar_t {\n return scalar_t(value);\n });\n });\n}\n\nTensor make_per_tensor_quantized_tensor_hip(\n const Tensor& self,\n double scale,\n int64_t zero_point) {\n Tensor dst = at::_empty_affine_quantized(\n self.sizes(),\n self.options().dtype(toQIntType(self.scalar_type())),\n scale,\n zero_point);\n assign_quantized_tensor_hip(self, dst);\n return dst;\n}\n\nTensor make_per_channel_quantized_tensor_hip(\n const Tensor& self,\n const Tensor& scales,\n const Tensor& zero_points,\n int64_t axis) {\n Tensor dst = at::_empty_per_channel_affine_quantized(\n self.sizes(),\n scales,\n zero_points,\n axis,\n self.options().dtype(toQIntType(self.scalar_type())));\n assign_quantized_tensor_hip(self, dst);\n return dst;\n}\n\n} // namespace native\n} // namespace at\n###" }, { "cuda": "\n#pragma once\n\n#include \n#include \n\nnamespace at {\nclass Tensor;\n}\nnamespace c10 {\nclass Scalar;\n}\n\nnamespace at { namespace native {\n\nvoid s_addmm_out_sparse_dense_cuda_worker(int64_t nnz, int64_t m, int64_t n, int64_t k, Tensor& r_, const Scalar& beta, const Tensor& t, const Scalar& alpha, Tensor& indices, Tensor& values, const Tensor& dense);\n\n}} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#pragma once\n\n#include \n#include \n\nnamespace at {\nclass Tensor;\n}\nnamespace c10 {\nclass Scalar;\n}\n\nnamespace at { namespace native {\n\nvoid s_addmm_out_sparse_dense_hip_worker(int64_t nnz, int64_t m, int64_t n, int64_t k, Tensor& r_, const Scalar& beta, const Tensor& t, const Scalar& alpha, Tensor& indices, Tensor& values, const Tensor& dense);\n\n}} // namespace at::native\n###" }, { "cuda": "\n// No \"#pragma once\" because this is a raw definition that can be copied by jit codegen.\n// Eager mode clients should not include this file directly, instead,\n// they should #include , which has a #pragma once.\n\n// Stores RNG state values. Passed as a kernel argument.\n// See Note [CUDA Graph-safe RNG states].\n//\n// The raw definition lives in its own file so jit codegen can easily copy it.\nnamespace at {\n\nstruct PhiloxCudaState {\n PhiloxCudaState() = default;\n // Called if graph capture is not underway\n PhiloxCudaState(uint64_t seed,\n uint64_t offset) {\n seed_.val = seed;\n offset_.val = offset;\n }\n // Called if graph capture is underway\n PhiloxCudaState(int64_t* seed,\n int64_t* offset_extragraph,\n uint32_t offset_intragraph) {\n seed_.ptr = seed;\n offset_.ptr = offset_extragraph;\n offset_intragraph_ = offset_intragraph;\n captured_ = true;\n }\n\n // Public members, directly accessible by at::cuda::philox::unpack.\n // If we made them private with getters/setters, the getters/setters\n // would have to be __device__, and we can't declare __device__ in ATen.\n union Payload {\n uint64_t val;\n int64_t* ptr;\n };\n\n Payload seed_;\n Payload offset_;\n uint32_t offset_intragraph_ = 0;\n bool captured_ = false;\n};\n\n} // namespace at\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n// No \"#pragma once\" because this is a raw definition that can be copied by jit codegen.\n// Eager mode clients should not include this file directly, instead,\n// they should #include , which has a #pragma once.\n\n// Stores RNG state values. Passed as a kernel argument.\n// See Note [HIP Graph-safe RNG states].\n//\n// The raw definition lives in its own file so jit codegen can easily copy it.\nnamespace at {\n\nstruct PhiloxHipState {\n PhiloxHipState() = default;\n // Called if graph capture is not underway\n PhiloxHipState(uint64_t seed,\n uint64_t offset) {\n seed_.val = seed;\n offset_.val = offset;\n }\n // Called if graph capture is underway\n PhiloxHipState(int64_t* seed,\n int64_t* offset_extragraph,\n uint32_t offset_intragraph) {\n seed_.ptr = seed;\n offset_.ptr = offset_extragraph;\n offset_intragraph_ = offset_intragraph;\n captured_ = true;\n }\n\n // Public members, directly accessible by at::cuda::philox::unpack.\n // If we made them private with getters/setters, the getters/setters\n // would have to be __device__, and we can't declare __device__ in ATen.\n union Payload {\n uint64_t val;\n int64_t* ptr;\n };\n\n Payload seed_;\n Payload offset_;\n uint32_t offset_intragraph_ = 0;\n bool captured_ = false;\n};\n\n} // namespace at\n###" }, { "cuda": "\n// Copyright (c) 2022, Tri Dao.\n\n// Splitting the different head dimensions to different files to speed up compilation.\n\n#include \n\nvoid run_fmha_bwd_hdim128(FMHA_dgrad_params ¶ms, cudaStream_t stream, const bool configure) {\n FP16_SWITCH(params.is_bf16, ([&] {\n using Kernel_traits = FMHA_kernel_traits<128, 128, 16, 1, 8, 0x100u, elem_type>;\n run_fmha_bwd_loop(params, stream, configure);\n }));\n}\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n// Copyright (c) 2022, Tri Dao.\n\n// Splitting the different head dimensions to different files to speed up compilation.\n\n#include \n\nvoid run_fmha_bwd_hdim128(FMHA_dgrad_params ¶ms, hipStream_t stream, const bool configure) {\n FP16_SWITCH(params.is_bf16, ([&] {\n using Kernel_traits = FMHA_kernel_traits<128, 128, 16, 1, 8, 0x100u, elem_type>;\n run_fmha_bwd_loop(params, stream, configure);\n }));\n}###" }, { "cuda": "\n// Copyright (c) 2022, Tri Dao.\n\n// Splitting the different head dimensions to different files to speed up compilation.\n\n#include \n\nvoid run_fmha_bwd_hdim32(FMHA_dgrad_params ¶ms, cudaStream_t stream, const bool configure) {\n FP16_SWITCH(params.is_bf16, ([&] {\n if (params.seqlen_k == 128) {\n using Kernel_traits = FMHA_kernel_traits<128, 32, 16, 1, 8, 0x08u, elem_type>;\n run_fmha_bwd_loop(params, stream, configure);\n } else if (params.seqlen_k >= 256) {\n using Kernel_traits = FMHA_kernel_traits<256, 32, 16, 1, 8, 0x08u, elem_type>;\n run_fmha_bwd_loop(params, stream, configure);\n }\n }));\n}\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n// Copyright (c) 2022, Tri Dao.\n\n// Splitting the different head dimensions to different files to speed up compilation.\n\n#include \n\nvoid run_fmha_bwd_hdim32(FMHA_dgrad_params ¶ms, hipStream_t stream, const bool configure) {\n FP16_SWITCH(params.is_bf16, ([&] {\n if (params.seqlen_k == 128) {\n using Kernel_traits = FMHA_kernel_traits<128, 32, 16, 1, 8, 0x08u, elem_type>;\n run_fmha_bwd_loop(params, stream, configure);\n } else if (params.seqlen_k >= 256) {\n using Kernel_traits = FMHA_kernel_traits<256, 32, 16, 1, 8, 0x08u, elem_type>;\n run_fmha_bwd_loop(params, stream, configure);\n }\n }));\n}###" }, { "cuda": "\n// Copyright (c) 2022, Tri Dao.\n\n// Splitting the different head dimensions to different files to speed up compilation.\n\n#include \n\nvoid run_fmha_bwd_hdim64(FMHA_dgrad_params ¶ms, cudaStream_t stream, const bool configure) {\n FP16_SWITCH(params.is_bf16, ([&] {\n auto dprops = at::cuda::getCurrentDeviceProperties();\n if (params.seqlen_k == 128) {\n using Kernel_traits = FMHA_kernel_traits<128, 64, 16, 1, 8, 0x08u, elem_type>;\n run_fmha_bwd_loop(params, stream, configure);\n } else if (params.seqlen_k >= 256) {\n if ((dprops->major == 8 && dprops->minor == 0) ||\n (dprops->major == 9 && dprops->minor == 0)) {\n // Don't share smem for K & V, and don't keep V in registers\n // This speeds things up by 2-3% by avoiding register spills, but it\n // uses more shared memory, which is fine on A100 and H100 but not other\n // GPUs. For other GPUs, we keep V in registers.\n using Kernel_traits =\n FMHA_kernel_traits<256, 64, 16, 1, 8, 0x100u, elem_type>;\n run_fmha_bwd_loop(params, stream, configure);\n } else if (dprops->major == 8 && dprops->minor > 0) {\n using Kernel_traits =\n FMHA_kernel_traits<256, 64, 16, 1, 8, 0x08u, elem_type>;\n run_fmha_bwd_loop(params, stream, configure);\n } else if (dprops->major == 7 && dprops->minor == 5) {\n using Kernel_traits =\n FMHA_kernel_traits<128, 64, 16, 1, 8, 0x08u, elem_type>;\n run_fmha_bwd_loop(params, stream, configure);\n }\n }\n }));\n}\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n// Copyright (c) 2022, Tri Dao.\n\n// Splitting the different head dimensions to different files to speed up compilation.\n\n#include \n\nvoid run_fmha_bwd_hdim64(FMHA_dgrad_params ¶ms, hipStream_t stream, const bool configure) {\n FP16_SWITCH(params.is_bf16, ([&] {\n auto dprops = at::cuda::getCurrentDeviceProperties();\n if (params.seqlen_k == 128) {\n using Kernel_traits = FMHA_kernel_traits<128, 64, 16, 1, 8, 0x08u, elem_type>;\n run_fmha_bwd_loop(params, stream, configure);\n } else if (params.seqlen_k >= 256) {\n if ((dprops->major == 8 && dprops->minor == 0) ||\n (dprops->major == 9 && dprops->minor == 0)) {\n // Don't share smem for K & V, and don't keep V in registers\n // This speeds things up by 2-3% by avoiding register spills, but it\n // uses more shared memory, which is fine on A100 and H100 but not other\n // GPUs. For other GPUs, we keep V in registers.\n using Kernel_traits =\n FMHA_kernel_traits<256, 64, 16, 1, 8, 0x100u, elem_type>;\n run_fmha_bwd_loop(params, stream, configure);\n } else if (dprops->major == 8 && dprops->minor > 0) {\n using Kernel_traits =\n FMHA_kernel_traits<256, 64, 16, 1, 8, 0x08u, elem_type>;\n run_fmha_bwd_loop(params, stream, configure);\n } else if (dprops->major == 7 && dprops->minor == 5) {\n using Kernel_traits =\n FMHA_kernel_traits<128, 64, 16, 1, 8, 0x08u, elem_type>;\n run_fmha_bwd_loop(params, stream, configure);\n }\n }\n }));\n}###" }, { "cuda": "\n// Copyright (c) 2022, Tri Dao.\n\n// Splitting the different head dimensions to different files to speed up compilation.\n\n#include \n\nvoid run_fmha_fwd_hdim128(Launch_params &launch_params) {\n FP16_SWITCH(launch_params.params.is_bf16, ([&] {\n using Kernel_traits = FMHA_kernel_traits<128, 128, 16, 1, 4, 0x08u, elem_type>;\n run_fmha_fwd_loop(launch_params);\n }));\n}\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n// Copyright (c) 2022, Tri Dao.\n\n// Splitting the different head dimensions to different files to speed up compilation.\n\n#include \n\nvoid run_fmha_fwd_hdim128(Launch_params &launch_params) {\n FP16_SWITCH(launch_params.params.is_bf16, ([&] {\n using Kernel_traits = FMHA_kernel_traits<128, 128, 16, 1, 4, 0x08u, elem_type>;\n run_fmha_fwd_loop(launch_params);\n }));\n}###" }, { "cuda": "\n// Copyright (c) 2022, Tri Dao.\n\n// Splitting the different head dimensions to different files to speed up compilation.\n\n#include \n\nvoid run_fmha_fwd_hdim32(Launch_params &launch_params) {\n FP16_SWITCH(launch_params.params.is_bf16, ([&] {\n if (launch_params.params.seqlen_k == 128) {\n using Kernel_traits = FMHA_kernel_traits<128, 32, 16, 1, 4, 0x08u, elem_type>;\n run_fmha_fwd_loop(launch_params);\n } else if (launch_params.params.seqlen_k >= 256) {\n using Kernel_traits = FMHA_kernel_traits<256, 32, 16, 1, 4, 0x08u, elem_type>;\n run_fmha_fwd_loop(launch_params);\n }\n }));\n}\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n// Copyright (c) 2022, Tri Dao.\n\n// Splitting the different head dimensions to different files to speed up compilation.\n\n#include \n\nvoid run_fmha_fwd_hdim32(Launch_params &launch_params) {\n FP16_SWITCH(launch_params.params.is_bf16, ([&] {\n if (launch_params.params.seqlen_k == 128) {\n using Kernel_traits = FMHA_kernel_traits<128, 32, 16, 1, 4, 0x08u, elem_type>;\n run_fmha_fwd_loop(launch_params);\n } else if (launch_params.params.seqlen_k >= 256) {\n using Kernel_traits = FMHA_kernel_traits<256, 32, 16, 1, 4, 0x08u, elem_type>;\n run_fmha_fwd_loop(launch_params);\n }\n }));\n}###" }, { "cuda": "\n// Copyright (c) 2022, Tri Dao.\n\n// Splitting the different head dimensions to different files to speed up compilation.\n\n#include \n\nvoid run_fmha_fwd_hdim64(Launch_params &launch_params) {\n FP16_SWITCH(launch_params.params.is_bf16, ([&] {\n if (launch_params.params.seqlen_k == 128) {\n using Kernel_traits = FMHA_kernel_traits<128, 64, 16, 1, 4, 0x08u, elem_type>;\n run_fmha_fwd_loop(launch_params);\n } else if (launch_params.params.seqlen_k >= 256) {\n using Kernel_traits = FMHA_kernel_traits<256, 64, 16, 1, 4, 0x08u, elem_type>;\n run_fmha_fwd_loop(launch_params);\n }\n }));\n}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n// Copyright (c) 2022, Tri Dao.\n\n// Splitting the different head dimensions to different files to speed up compilation.\n\n#include \n\nvoid run_fmha_fwd_hdim64(Launch_params &launch_params) {\n FP16_SWITCH(launch_params.params.is_bf16, ([&] {\n if (launch_params.params.seqlen_k == 128) {\n using Kernel_traits = FMHA_kernel_traits<128, 64, 16, 1, 4, 0x08u, elem_type>;\n run_fmha_fwd_loop(launch_params);\n } else if (launch_params.params.seqlen_k >= 256) {\n using Kernel_traits = FMHA_kernel_traits<256, 64, 16, 1, 4, 0x08u, elem_type>;\n run_fmha_fwd_loop(launch_params);\n }\n }));\n}\n###" }, { "cuda": "\n/*\n * Copyright (c) Meta Platforms, Inc. and affiliates.\n * All rights reserved.\n *\n * This source code is licensed under the BSD-style license found in the\n * LICENSE file in the root directory of this source tree.\n */\n// This file is auto-generated. See \"generate_kernels.py\"\n#include \n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads,\n AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_bf16_aligned_128x128_k128_dropout_sm80(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 800\n#if __CUDA_ARCH__ < 1000\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x128_k128_dropout_sm80` is for sm80-sm100, but was built for sm%d\\n\",\n int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads,\n AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_bf16_aligned_64x64_k128_dropout_sm80(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 800\n#if __CUDA_ARCH__ < 1000\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k128_dropout_sm80` is for sm80-sm100, but was built for sm%d\\n\",\n int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n/*\n * Copyright (c) Meta Platforms, Inc. and affiliates.\n * All rights reserved.\n *\n * This source code is licensed under the BSD-style license found in the\n * LICENSE file in the root directory of this source tree.\n */\n// This file is auto-generated. See \"generate_kernels.py\"\n#include \n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads,\n AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_bf16_aligned_128x128_k128_dropout_sm80(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 800\n#if __HIP_ARCH__ < 1000\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x128_k128_dropout_sm80` is for sm80-sm100, but was built for sm%d\\n\",\n int(__HIP_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads,\n AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_bf16_aligned_64x64_k128_dropout_sm80(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 800\n#if __HIP_ARCH__ < 1000\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k128_dropout_sm80` is for sm80-sm100, but was built for sm%d\\n\",\n int(__HIP_ARCH__ + 0) / 10);\n#endif\n}\n###" }, { "cuda": "\n/*\n * Copyright (c) Meta Platforms, Inc. and affiliates.\n * All rights reserved.\n *\n * This source code is licensed under the BSD-style license found in the\n * LICENSE file in the root directory of this source tree.\n */\n// This file is auto-generated. See \"generate_kernels.py\"\n#include \n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads,\n AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_bf16_aligned_64x64_k32_seqaligned_sm80(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 800\n#if __CUDA_ARCH__ < 1000\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k32_seqaligned_sm80` is for sm80-sm100, but was built for sm%d\\n\",\n int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads,\n AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_bf16_aligned_64x64_k32_sm80(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 800\n#if __CUDA_ARCH__ < 1000\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k32_sm80` is for sm80-sm100, but was built for sm%d\\n\",\n int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n/*\n * Copyright (c) Meta Platforms, Inc. and affiliates.\n * All rights reserved.\n *\n * This source code is licensed under the BSD-style license found in the\n * LICENSE file in the root directory of this source tree.\n */\n// This file is auto-generated. See \"generate_kernels.py\"\n#include \n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads,\n AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_bf16_aligned_64x64_k32_seqaligned_sm80(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 800\n#if __HIP_ARCH__ < 1000\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k32_seqaligned_sm80` is for sm80-sm100, but was built for sm%d\\n\",\n int(__HIP_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads,\n AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_bf16_aligned_64x64_k32_sm80(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 800\n#if __HIP_ARCH__ < 1000\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k32_sm80` is for sm80-sm100, but was built for sm%d\\n\",\n int(__HIP_ARCH__ + 0) / 10);\n#endif\n}\n###" }, { "cuda": "\n/*\n * Copyright (c) Meta Platforms, Inc. and affiliates.\n * All rights reserved.\n *\n * This source code is licensed under the BSD-style license found in the\n * LICENSE file in the root directory of this source tree.\n */\n// This file is auto-generated. See \"generate_kernels.py\"\n#include \n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads,\n AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_bf16_aligned_64x64_k32_dropout_sm80(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 800\n#if __CUDA_ARCH__ < 1000\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k32_dropout_sm80` is for sm80-sm100, but was built for sm%d\\n\",\n int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n/*\n * Copyright (c) Meta Platforms, Inc. and affiliates.\n * All rights reserved.\n *\n * This source code is licensed under the BSD-style license found in the\n * LICENSE file in the root directory of this source tree.\n */\n// This file is auto-generated. See \"generate_kernels.py\"\n#include \n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads,\n AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_bf16_aligned_64x64_k32_dropout_sm80(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 800\n#if __HIP_ARCH__ < 1000\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k32_dropout_sm80` is for sm80-sm100, but was built for sm%d\\n\",\n int(__HIP_ARCH__ + 0) / 10);\n#endif\n}\n###" }, { "cuda": "\n/*\n * Copyright (c) Meta Platforms, Inc. and affiliates.\n * All rights reserved.\n *\n * This source code is licensed under the BSD-style license found in the\n * LICENSE file in the root directory of this source tree.\n */\n// This file is auto-generated. See \"generate_kernels.py\"\n#include \n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads,\n AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_bf16_aligned_64x64_k64_seqaligned_sm80(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 800\n#if __CUDA_ARCH__ < 1000\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k64_seqaligned_sm80` is for sm80-sm100, but was built for sm%d\\n\",\n int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads,\n AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_bf16_aligned_64x64_k64_sm80(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 800\n#if __CUDA_ARCH__ < 1000\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k64_sm80` is for sm80-sm100, but was built for sm%d\\n\",\n int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n/*\n * Copyright (c) Meta Platforms, Inc. and affiliates.\n * All rights reserved.\n *\n * This source code is licensed under the BSD-style license found in the\n * LICENSE file in the root directory of this source tree.\n */\n// This file is auto-generated. See \"generate_kernels.py\"\n#include \n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads,\n AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_bf16_aligned_64x64_k64_seqaligned_sm80(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 800\n#if __HIP_ARCH__ < 1000\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k64_seqaligned_sm80` is for sm80-sm100, but was built for sm%d\\n\",\n int(__HIP_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads,\n AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_bf16_aligned_64x64_k64_sm80(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 800\n#if __HIP_ARCH__ < 1000\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k64_sm80` is for sm80-sm100, but was built for sm%d\\n\",\n int(__HIP_ARCH__ + 0) / 10);\n#endif\n}\n###" }, { "cuda": "\n#pragma once\n#include \nnamespace at {\nnamespace cuda {\nnamespace detail {\n#define MAX_TENSORINFO_DIMS 25\n\ntemplate \nstruct TensorInfo {\n TensorInfo();\n TensorInfo(T* p, int dim, IndexType sz[MAX_TENSORINFO_DIMS], IndexType st[MAX_TENSORINFO_DIMS]);\n \n \n \n void reduceDim(int dim);\n \n int collapseDims(const int excludeDim = -1);\n \n \n __host__ __device__ inline bool isContiguous() const {\n return (dims == 1 && strides[0] == 1);\n }\n T* data;\n IndexType sizes[MAX_TENSORINFO_DIMS];\n IndexType strides[MAX_TENSORINFO_DIMS];\n int dims;\n};\ntemplate \nTensorInfo::TensorInfo() {\n data = nullptr;\n dims = 0;\n}\ntemplate \nTensorInfo::TensorInfo(T* p, int dim, IndexType sz[MAX_TENSORINFO_DIMS], IndexType st[MAX_TENSORINFO_DIMS]) {\n data = p;\n dims = dim;\n TORCH_CHECK(dims < MAX_TENSORINFO_DIMS, \"CUDA Tensors cannot have more than 25 dimensions\");\n for (int i = 0; i < dim; ++i) {\n sizes[i] = sz[i];\n strides[i] = st[i];\n }\n}\ntemplate \nvoid\nTensorInfo::reduceDim(int dim) {\n TORCH_CHECK(dim < dims && dim >= 0, \"expected dim between 0 and dims - 1\");\n sizes[dim] = 1;\n}\ntemplate \nint\nTensorInfo::collapseDims(const int excludeDim) {\n auto result = at::collapse_dims(sizes, strides, dims, excludeDim);\n dims = std::get<1>(result);\n return std::get<0>(result);\n}\n\n\ntemplate \nstruct IndexToOffset {\n static __host__ __device__ IndexType get(\n IndexType linearId, const TensorInfo& info) {\n IndexType offset = 0;\n \n for (int i = Dims - 1; i > 0; --i) {\n IndexType curDimIndex = linearId % info.sizes[i];\n IndexType curDimOffset = curDimIndex * info.strides[i];\n offset += curDimOffset;\n linearId /= info.sizes[i];\n }\n return offset + linearId * info.strides[0];\n }\n};\n\ntemplate \nstruct IndexToOffset {\n static inline __host__ __device__ IndexType get(\n IndexType linearId, const TensorInfo& info) {\n IndexType offset = 0;\n for (int i = info.dims - 1; i > 0; --i) {\n IndexType curDimIndex = linearId % info.sizes[i];\n IndexType curDimOffset = curDimIndex * info.strides[i];\n offset += curDimOffset;\n linearId /= info.sizes[i];\n }\n return offset + linearId * info.strides[0];\n }\n};\n} \n} \n} \n\n###", "hip": " \n#pragma once\n#include \nnamespace at {\nnamespace hip {\nnamespace detail {\n#define MAX_TENSORINFO_DIMS 25\n\ntemplate \nstruct TensorInfo {\n TensorInfo();\n TensorInfo(T* p, int dim, IndexType sz[MAX_TENSORINFO_DIMS], IndexType st[MAX_TENSORINFO_DIMS]);\n \n \n \n void reduceDim(int dim);\n \n int collapseDims(const int excludeDim = -1);\n \n \n __host__ __device__ inline bool isContiguous() const {\n return (dims == 1 && strides[0] == 1);\n }\n T* data;\n IndexType sizes[MAX_TENSORINFO_DIMS];\n IndexType strides[MAX_TENSORINFO_DIMS];\n int dims;\n};\ntemplate \nTensorInfo::TensorInfo() {\n data = nullptr;\n dims = 0;\n}\ntemplate \nTensorInfo::TensorInfo(T* p, int dim, IndexType sz[MAX_TENSORINFO_DIMS], IndexType st[MAX_TENSORINFO_DIMS]) {\n data = p;\n dims = dim;\n TORCH_CHECK(dims < MAX_TENSORINFO_DIMS, \"HIP Tensors cannot have more than 25 dimensions\");\n for (int i = 0; i < dim; ++i) {\n sizes[i] = sz[i];\n strides[i] = st[i];\n }\n}\ntemplate \nvoid\nTensorInfo::reduceDim(int dim) {\n TORCH_CHECK(dim < dims && dim >= 0, \"expected dim between 0 and dims - 1\");\n sizes[dim] = 1;\n}\ntemplate \nint\nTensorInfo::collapseDims(const int excludeDim) {\n auto result = at::collapse_dims(sizes, strides, dims, excludeDim);\n dims = std::get<1>(result);\n return std::get<0>(result);\n}\n\n\ntemplate \nstruct IndexToOffset {\n static __host__ __device__ IndexType get(\n IndexType linearId, const TensorInfo& info) {\n IndexType offset = 0;\n \n for (int i = Dims - 1; i > 0; --i) {\n IndexType curDimIndex = linearId % info.sizes[i];\n IndexType curDimOffset = curDimIndex * info.strides[i];\n offset += curDimOffset;\n linearId /= info.sizes[i];\n }\n return offset + linearId * info.strides[0];\n }\n};\n\ntemplate \nstruct IndexToOffset {\n static inline __host__ __device__ IndexType get(\n IndexType linearId, const TensorInfo& info) {\n IndexType offset = 0;\n for (int i = info.dims - 1; i > 0; --i) {\n IndexType curDimIndex = linearId % info.sizes[i];\n IndexType curDimOffset = curDimIndex * info.strides[i];\n offset += curDimOffset;\n linearId /= info.sizes[i];\n }\n return offset + linearId * info.strides[0];\n }\n};\n} \n} \n} ###" }, { "cuda": "\n/*\n * Copyright (c) Meta Platforms, Inc. and affiliates.\n * All rights reserved.\n *\n * This source code is licensed under the BSD-style license found in the\n * LICENSE file in the root directory of this source tree.\n */\n// This file is auto-generated. See \"generate_kernels.py\"\n#include \n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads,\n AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_bf16_aligned_64x64_k64_dropout_sm80(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 800\n#if __CUDA_ARCH__ < 1000\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k64_dropout_sm80` is for sm80-sm100, but was built for sm%d\\n\",\n int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n/*\n * Copyright (c) Meta Platforms, Inc. and affiliates.\n * All rights reserved.\n *\n * This source code is licensed under the BSD-style license found in the\n * LICENSE file in the root directory of this source tree.\n */\n// This file is auto-generated. See \"generate_kernels.py\"\n#include \n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads,\n AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_bf16_aligned_64x64_k64_dropout_sm80(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 800\n#if __HIP_ARCH__ < 1000\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k64_dropout_sm80` is for sm80-sm100, but was built for sm%d\\n\",\n int(__HIP_ARCH__ + 0) / 10);\n#endif\n}\n###" }, { "cuda": "\n/*\n * Copyright (c) Meta Platforms, Inc. and affiliates.\n * All rights reserved.\n *\n * This source code is licensed under the BSD-style license found in the\n * LICENSE file in the root directory of this source tree.\n */\n// This file is auto-generated. See \"generate_kernels.py\"\n#include \n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads,\n AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_bf16_aligned_128x64_k65536_sm80(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 800\n#if __CUDA_ARCH__ < 1000\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x64_k65536_sm80` is for sm80-sm100, but was built for sm%d\\n\",\n int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads,\n AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_bf16_aligned_64x64_k65536_sm80(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 800\n#if __CUDA_ARCH__ < 1000\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k65536_sm80` is for sm80-sm100, but was built for sm%d\\n\",\n int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n/*\n * Copyright (c) Meta Platforms, Inc. and affiliates.\n * All rights reserved.\n *\n * This source code is licensed under the BSD-style license found in the\n * LICENSE file in the root directory of this source tree.\n */\n// This file is auto-generated. See \"generate_kernels.py\"\n#include \n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads,\n AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_bf16_aligned_128x64_k65536_sm80(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 800\n#if __HIP_ARCH__ < 1000\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x64_k65536_sm80` is for sm80-sm100, but was built for sm%d\\n\",\n int(__HIP_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads,\n AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_bf16_aligned_64x64_k65536_sm80(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 800\n#if __HIP_ARCH__ < 1000\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k65536_sm80` is for sm80-sm100, but was built for sm%d\\n\",\n int(__HIP_ARCH__ + 0) / 10);\n#endif\n}\n###" }, { "cuda": "\n/*\n * Copyright (c) Meta Platforms, Inc. and affiliates.\n * All rights reserved.\n *\n * This source code is licensed under the BSD-style license found in the\n * LICENSE file in the root directory of this source tree.\n */\n// This file is auto-generated. See \"generate_kernels.py\"\n#include \n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads,\n AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_bf16_aligned_128x64_k65536_dropout_sm80(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 800\n#if __CUDA_ARCH__ < 1000\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x64_k65536_dropout_sm80` is for sm80-sm100, but was built for sm%d\\n\",\n int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads,\n AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_bf16_aligned_64x64_k65536_dropout_sm80(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 800\n#if __CUDA_ARCH__ < 1000\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k65536_dropout_sm80` is for sm80-sm100, but was built for sm%d\\n\",\n int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n/*\n * Copyright (c) Meta Platforms, Inc. and affiliates.\n * All rights reserved.\n *\n * This source code is licensed under the BSD-style license found in the\n * LICENSE file in the root directory of this source tree.\n */\n// This file is auto-generated. See \"generate_kernels.py\"\n#include \n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads,\n AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_bf16_aligned_128x64_k65536_dropout_sm80(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 800\n#if __HIP_ARCH__ < 1000\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x64_k65536_dropout_sm80` is for sm80-sm100, but was built for sm%d\\n\",\n int(__HIP_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads,\n AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_bf16_aligned_64x64_k65536_dropout_sm80(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 800\n#if __HIP_ARCH__ < 1000\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k65536_dropout_sm80` is for sm80-sm100, but was built for sm%d\\n\",\n int(__HIP_ARCH__ + 0) / 10);\n#endif\n}\n###" }, { "cuda": "\n/*\n * Copyright (c) Meta Platforms, Inc. and affiliates.\n * All rights reserved.\n *\n * This source code is licensed under the BSD-style license found in the\n * LICENSE file in the root directory of this source tree.\n */\n// This file is auto-generated. See \"generate_kernels.py\"\n#include \n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads,\n AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_bf16_aligned_128x64_k96_sm80(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 800\n#if __CUDA_ARCH__ < 1000\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x64_k96_sm80` is for sm80-sm100, but was built for sm%d\\n\",\n int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n/*\n * Copyright (c) Meta Platforms, Inc. and affiliates.\n * All rights reserved.\n *\n * This source code is licensed under the BSD-style license found in the\n * LICENSE file in the root directory of this source tree.\n */\n// This file is auto-generated. See \"generate_kernels.py\"\n#include \n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads,\n AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_bf16_aligned_128x64_k96_sm80(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 800\n#if __HIP_ARCH__ < 1000\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x64_k96_sm80` is for sm80-sm100, but was built for sm%d\\n\",\n int(__HIP_ARCH__ + 0) / 10);\n#endif\n}\n###" }, { "cuda": "\n/*\n * Copyright (c) Meta Platforms, Inc. and affiliates.\n * All rights reserved.\n *\n * This source code is licensed under the BSD-style license found in the\n * LICENSE file in the root directory of this source tree.\n */\n// This file is auto-generated. See \"generate_kernels.py\"\n#include \n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads,\n AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f16_aligned_128x64_k96_sm80(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 800\n#if __CUDA_ARCH__ < 1000\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f16_aligned_128x64_k96_sm80` is for sm80-sm100, but was built for sm%d\\n\",\n int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n/*\n * Copyright (c) Meta Platforms, Inc. and affiliates.\n * All rights reserved.\n *\n * This source code is licensed under the BSD-style license found in the\n * LICENSE file in the root directory of this source tree.\n */\n// This file is auto-generated. See \"generate_kernels.py\"\n#include \n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads,\n AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f16_aligned_128x64_k96_sm80(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 800\n#if __HIP_ARCH__ < 1000\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f16_aligned_128x64_k96_sm80` is for sm80-sm100, but was built for sm%d\\n\",\n int(__HIP_ARCH__ + 0) / 10);\n#endif\n}\n###" }, { "cuda": "\n\n\n#include \n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k128_sm50(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 500\n#if __CUDA_ARCH__ < 700\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_sm50` is for sm50-sm70, but was built for sm%d\\n\", int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k128_sm70(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 700\n#if __CUDA_ARCH__ < 750\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_sm70` is for sm70-sm75, but was built for sm%d\\n\", int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k128_sm75(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 750\n#if __CUDA_ARCH__ < 800\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_sm75` is for sm75-sm80, but was built for sm%d\\n\", int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n\n###", "hip": " \n#include \"hip/hip_runtime.h\"\n\n\n#include \n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k128_sm50(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 500\n#if __HIP_ARCH__ < 700\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_sm50` is for sm50-sm70, but was built for sm%d\\n\", int(__HIP_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k128_sm70(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 700\n#if __HIP_ARCH__ < 750\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_sm70` is for sm70-sm75, but was built for sm%d\\n\", int(__HIP_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k128_sm75(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 750\n#if __HIP_ARCH__ < 800\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_sm75` is for sm75-sm80, but was built for sm%d\\n\", int(__HIP_ARCH__ + 0) / 10);\n#endif\n}###" }, { "cuda": "\n\n\n#include \n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm50(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 500\n#if __CUDA_ARCH__ < 700\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm50` is for sm50-sm70, but was built for sm%d\\n\", int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm70(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 700\n#if __CUDA_ARCH__ < 750\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm70` is for sm70-sm75, but was built for sm%d\\n\", int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm75(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 750\n#if __CUDA_ARCH__ < 800\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm75` is for sm75-sm80, but was built for sm%d\\n\", int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n\n###", "hip": " \n#include \"hip/hip_runtime.h\"\n\n\n#include \n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm50(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 500\n#if __HIP_ARCH__ < 700\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm50` is for sm50-sm70, but was built for sm%d\\n\", int(__HIP_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm70(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 700\n#if __HIP_ARCH__ < 750\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm70` is for sm70-sm75, but was built for sm%d\\n\", int(__HIP_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm75(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 750\n#if __HIP_ARCH__ < 800\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm75` is for sm75-sm80, but was built for sm%d\\n\", int(__HIP_ARCH__ + 0) / 10);\n#endif\n}###" }, { "cuda": "\n\n\n#include \n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k32_sm50(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 500\n#if __CUDA_ARCH__ < 700\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_sm50` is for sm50-sm70, but was built for sm%d\\n\", int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k32_sm70(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 700\n#if __CUDA_ARCH__ < 750\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_sm70` is for sm70-sm75, but was built for sm%d\\n\", int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k32_sm75(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 750\n#if __CUDA_ARCH__ < 800\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_sm75` is for sm75-sm80, but was built for sm%d\\n\", int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n\n###", "hip": " \n#include \"hip/hip_runtime.h\"\n\n\n#include \n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k32_sm50(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 500\n#if __HIP_ARCH__ < 700\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_sm50` is for sm50-sm70, but was built for sm%d\\n\", int(__HIP_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k32_sm70(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 700\n#if __HIP_ARCH__ < 750\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_sm70` is for sm70-sm75, but was built for sm%d\\n\", int(__HIP_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k32_sm75(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 750\n#if __HIP_ARCH__ < 800\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_sm75` is for sm75-sm80, but was built for sm%d\\n\", int(__HIP_ARCH__ + 0) / 10);\n#endif\n}###" }, { "cuda": "\n\n\n#include \n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm50(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 500\n#if __CUDA_ARCH__ < 700\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm50` is for sm50-sm70, but was built for sm%d\\n\", int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm70(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 700\n#if __CUDA_ARCH__ < 750\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm70` is for sm70-sm75, but was built for sm%d\\n\", int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm75(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 750\n#if __CUDA_ARCH__ < 800\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm75` is for sm75-sm80, but was built for sm%d\\n\", int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n\n###", "hip": " \n#include \"hip/hip_runtime.h\"\n\n\n#include \n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm50(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 500\n#if __HIP_ARCH__ < 700\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm50` is for sm50-sm70, but was built for sm%d\\n\", int(__HIP_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm70(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 700\n#if __HIP_ARCH__ < 750\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm70` is for sm70-sm75, but was built for sm%d\\n\", int(__HIP_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm75(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 750\n#if __HIP_ARCH__ < 800\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm75` is for sm75-sm80, but was built for sm%d\\n\", int(__HIP_ARCH__ + 0) / 10);\n#endif\n}###" }, { "cuda": "\n\n\n#include \n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k64_sm50(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 500\n#if __CUDA_ARCH__ < 700\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_sm50` is for sm50-sm70, but was built for sm%d\\n\", int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k64_sm70(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 700\n#if __CUDA_ARCH__ < 750\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_sm70` is for sm70-sm75, but was built for sm%d\\n\", int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k64_sm75(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 750\n#if __CUDA_ARCH__ < 800\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_sm75` is for sm75-sm80, but was built for sm%d\\n\", int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n\n###", "hip": " \n#include \"hip/hip_runtime.h\"\n\n\n#include \n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k64_sm50(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 500\n#if __HIP_ARCH__ < 700\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_sm50` is for sm50-sm70, but was built for sm%d\\n\", int(__HIP_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k64_sm70(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 700\n#if __HIP_ARCH__ < 750\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_sm70` is for sm70-sm75, but was built for sm%d\\n\", int(__HIP_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k64_sm75(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 750\n#if __HIP_ARCH__ < 800\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_sm75` is for sm75-sm80, but was built for sm%d\\n\", int(__HIP_ARCH__ + 0) / 10);\n#endif\n}###" }, { "cuda": "\n// No \"#pragma once\" because this is a raw definition that can be copied by jit codegen.\n// Eager mode clients should not include this file directly, instead,\n// they should #include , which has a #pragma once.\n\nnamespace at {\nnamespace cuda {\nnamespace philox {\n\n// In-kernel call to retrieve philox seed and offset from a PhiloxCudaState instance whether\n// that instance was created with graph capture underway or not.\n// See Note [CUDA Graph-safe RNG states].\n//\n// We can't write a __device__ function in CUDAGeneratorImpl.h, because it's in ATen.\n// Also, whatever call unpacks PhiloxCudaState in consumer kernels must be inlineable.\n// Easiest thing that comes to mind is, define a __device__ unpack helper here, in ATen/cuda.\n//\n// The raw definition lives in its own file so jit codegen can easily copy it.\n__device__ __forceinline__ std::tuple\nunpack(at::PhiloxCudaState arg) {\n if (arg.captured_) {\n // static_cast avoids \"warning: invalid narrowing conversion from \"long\" to \"unsigned long\".\n // *(arg.offset_.ptr) is a broadcast load of a single int64_t to the entire kernel.\n // For most threads' reads it will hit in cache, so it shouldn't hurt performance.\n return std::make_tuple(static_cast(*arg.seed_.ptr), static_cast(*(arg.offset_.ptr) + arg.offset_intragraph_));\n } else {\n return std::make_tuple(arg.seed_.val, arg.offset_.val);\n }\n}\n\n} // namespace philox\n} // namespace cuda\n} // namespace at\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n// No \"#pragma once\" because this is a raw definition that can be copied by jit codegen.\n// Eager mode clients should not include this file directly, instead,\n// they should #include , which has a #pragma once.\n\nnamespace at {\nnamespace hip {\nnamespace philox {\n\n// In-kernel call to retrieve philox seed and offset from a PhiloxHipState instance whether\n// that instance was created with graph capture underway or not.\n// See Note [HIP Graph-safe RNG states].\n//\n// We can't write a __device__ function in HIPGeneratorImpl.h, because it's in ATen.\n// Also, whatever call unpacks PhiloxHipState in consumer kernels must be inlineable.\n// Easiest thing that comes to mind is, define a __device__ unpack helper here, in ATen/cuda.\n//\n// The raw definition lives in its own file so jit codegen can easily copy it.\n__device__ __forceinline__ std::tuple\nunpack(at::PhiloxHipState arg) {\n if (arg.captured_) {\n // static_cast avoids \"warning: invalid narrowing conversion from \"long\" to \"unsigned long\".\n // *(arg.offset_.ptr) is a broadcast load of a single int64_t to the entire kernel.\n // For most threads' reads it will hit in cache, so it shouldn't hurt performance.\n return std::make_tuple(static_cast(*arg.seed_.ptr), static_cast(*(arg.offset_.ptr) + arg.offset_intragraph_));\n } else {\n return std::make_tuple(arg.seed_.val, arg.offset_.val);\n }\n}\n\n} // namespace philox\n} // namespace hip\n} // namespace at\n###" }, { "cuda": "\n\n\n#include \n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm50(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 500\n#if __CUDA_ARCH__ < 700\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm50` is for sm50-sm70, but was built for sm%d\\n\", int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm70(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 700\n#if __CUDA_ARCH__ < 750\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm70` is for sm70-sm75, but was built for sm%d\\n\", int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm75(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 750\n#if __CUDA_ARCH__ < 800\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm75` is for sm75-sm80, but was built for sm%d\\n\", int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n\n###", "hip": " \n#include \"hip/hip_runtime.h\"\n\n\n#include \n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm50(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 500\n#if __HIP_ARCH__ < 700\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm50` is for sm50-sm70, but was built for sm%d\\n\", int(__HIP_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm70(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 700\n#if __HIP_ARCH__ < 750\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm70` is for sm70-sm75, but was built for sm%d\\n\", int(__HIP_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm75(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 750\n#if __HIP_ARCH__ < 800\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm75` is for sm75-sm80, but was built for sm%d\\n\", int(__HIP_ARCH__ + 0) / 10);\n#endif\n}###" }, { "cuda": "\n\n\n#include \n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k65536_sm50(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 500\n#if __CUDA_ARCH__ < 700\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k65536_sm50` is for sm50-sm70, but was built for sm%d\\n\", int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k65536_sm70(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 700\n#if __CUDA_ARCH__ < 750\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k65536_sm70` is for sm70-sm75, but was built for sm%d\\n\", int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k65536_sm75(typename AttentionBackwardKernel::Params p) {\n#ifdef __CUDA_ARCH__\n#if __CUDA_ARCH__ >= 750\n#if __CUDA_ARCH__ < 800\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k65536_sm75` is for sm75-sm80, but was built for sm%d\\n\", int(__CUDA_ARCH__ + 0) / 10);\n#endif\n}\n\n###", "hip": " \n#include \"hip/hip_runtime.h\"\n\n\n#include \n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k65536_sm50(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 500\n#if __HIP_ARCH__ < 700\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k65536_sm50` is for sm50-sm70, but was built for sm%d\\n\", int(__HIP_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k65536_sm70(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 700\n#if __HIP_ARCH__ < 750\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k65536_sm70` is for sm70-sm75, but was built for sm%d\\n\", int(__HIP_ARCH__ + 0) / 10);\n#endif\n}\n__global__ void __launch_bounds__(\n AttentionBackwardKernel::kNumThreads, AttentionBackwardKernel::kMinBlocksPerSm)\nfmha_cutlassB_f32_notaligned_64x64_k65536_sm75(typename AttentionBackwardKernel::Params p) {\n#ifdef __HIP_ARCH__\n#if __HIP_ARCH__ >= 750\n#if __HIP_ARCH__ < 800\n if (!p.advance_to_block()) {\n return;\n }\n AttentionBackwardKernel::attention_kernel(p);\n return;\n#endif\n#endif\n printf(\n \"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k65536_sm75` is for sm75-sm80, but was built for sm%d\\n\", int(__HIP_ARCH__ + 0) / 10);\n#endif\n}###" }, { "cuda": "\n#include \n#include \n\nint safeDeviceCount() {\n int count;\n cudaError_t err = cudaGetDeviceCount(&count);\n if (err == cudaErrorInsufficientDriver || err == cudaErrorNoDevice) {\n return 0;\n }\n return count;\n}\n\n#define SKIP_IF_NO_GPU() \\\n do { \\\n if (safeDeviceCount() == 0) { \\\n return; \\\n } \\\n } while(0)\n\n#define C10_ASSERT_NEAR(a, b, tol) assert(abs(a - b) < tol)\n#define C10_DEFINE_TEST(a, b) \\\n__global__ void CUDA##a##b(); \\\nTEST(a##Device, b) { \\\n SKIP_IF_NO_GPU(); \\\n cudaDeviceSynchronize(); \\\n CUDA##a##b<<<1, 1>>>(); \\\n C10_CUDA_KERNEL_LAUNCH_CHECK(); \\\n cudaDeviceSynchronize(); \\\n ASSERT_EQ(cudaGetLastError(), cudaSuccess); \\\n} \\\n__global__ void CUDA##a##b()\n#include \n\n\n#undef C10_DEFINE_TEST\n#undef C10_ASSERT_NEAR\n#define C10_DEFINE_TEST(a, b) TEST(a##Host, b)\n#define C10_ASSERT_NEAR(a, b, tol) ASSERT_NEAR(a, b, tol)\n#include \n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \n#include \n\nint safeDeviceCount() {\n int count;\n hipError_t err = hipGetDeviceCount(&count);\n if (err == hipErrorInsufficientDriver || err == hipErrorNoDevice) {\n return 0;\n }\n return count;\n}\n\n#define SKIP_IF_NO_GPU() \\\n do { \\\n if (safeDeviceCount() == 0) { \\\n return; \\\n } \\\n } while(0)\n\n#define C10_ASSERT_NEAR(a, b, tol) assert(abs(a - b) < tol)\n#define C10_DEFINE_TEST(a, b) \\\n__global__ void HIP##a##b(); \\\nTEST(a##Device, b) { \\\n SKIP_IF_NO_GPU(); \\\n hipDeviceSynchronize(); \\\n hipLaunchKernelGGL(( HIP##a##b), dim3(1), dim3(1), 0, 0, ); \\\n C10_HIP_KERNEL_LAUNCH_CHECK(); \\\n hipDeviceSynchronize(); \\\n ASSERT_EQ(hipGetLastError(), hipSuccess); \\\n} \\\n__global__ void HIP##a##b()\n#include \n\n\n#undef C10_DEFINE_TEST\n#undef C10_ASSERT_NEAR\n#define C10_DEFINE_TEST(a, b) TEST(a##Host, b)\n#define C10_ASSERT_NEAR(a, b, tol) ASSERT_NEAR(a, b, tol)\n#include \n###" }, { "cuda": "\n#include \n\n#include \n#include \n#include \n\n#include \n\nusing namespace at;\n\n// optional in cuda files\nTEST(OptionalTest, OptionalTestCUDA) {\n if (!at::cuda::is_available()) return;\n c10::optional trivially_destructible;\n c10::optional> non_trivially_destructible;\n ASSERT_FALSE(trivially_destructible.has_value());\n ASSERT_FALSE(non_trivially_destructible.has_value());\n\n trivially_destructible = {5};\n non_trivially_destructible = std::vector{5, 10};\n ASSERT_TRUE(trivially_destructible.has_value());\n ASSERT_TRUE(non_trivially_destructible.has_value());\n}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \n\n#include \n#include \n#include \n\n#include \n\nusing namespace at;\n\n// optional in cuda files\nTEST(OptionalTest, OptionalTestHIP) {\n if (!at::cuda::is_available()) return;\n c10::optional trivially_destructible;\n c10::optional> non_trivially_destructible;\n ASSERT_FALSE(trivially_destructible.has_value());\n ASSERT_FALSE(non_trivially_destructible.has_value());\n\n trivially_destructible = {5};\n non_trivially_destructible = std::vector{5, 10};\n ASSERT_TRUE(trivially_destructible.has_value());\n ASSERT_TRUE(non_trivially_destructible.has_value());\n}\n###" }, { "cuda": "\n#include \n\n#include \n#include \n#include \n\n#include \n\nusing namespace at;\n\n__global__ void test_tensor_packed_accessor_kernel(\n PackedTensorAccessor64 resa,\n PackedTensorAccessor64 t1a,\n PackedTensorAccessor64 t2a) {\n for (int64_t i = 0; i < resa.size(0); i++) {\n float val = 0.0f;\n for (int64_t j = 0; j < t1a.size(1); j++) {\n val += t1a[i][j] * t2a[j];\n }\n resa[i] = val;\n }\n}\n\n// test GenericPackedTensorAccessor and Tensor.generic_packed_accessor\nTEST(PackedtensoraccessorTest, PackedtensoraccessorTestCUDA) {\n if (!at::cuda::is_available()) return;\n manual_seed(123);\n\n Tensor t1 = rand({4, 4}, CUDA(kFloat));\n Tensor t2 = rand({4}, CUDA(kFloat));\n Tensor res = empty({4}, CUDA(kFloat));\n\n auto t1a = t1.packed_accessor64();\n auto t2a = t2.packed_accessor64();\n auto resa = res.packed_accessor64();\n\n auto stream = at::cuda::getCurrentCUDAStream();\n\n test_tensor_packed_accessor_kernel<<<1, 1, 0, stream>>>(resa, t1a, t2a);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n ASSERT_TRUE(cudaSuccess == cudaDeviceSynchronize());\n\n auto expected = mv(t1, t2);\n\n ASSERT_TRUE(res.allclose(expected));\n}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \n\n#include \n#include \n#include \n\n#include \n\nusing namespace at;\n\n__global__ void test_tensor_packed_accessor_kernel(\n PackedTensorAccessor64 resa,\n PackedTensorAccessor64 t1a,\n PackedTensorAccessor64 t2a) {\n for (int64_t i = 0; i < resa.size(0); i++) {\n float val = 0.0f;\n for (int64_t j = 0; j < t1a.size(1); j++) {\n val += t1a[i][j] * t2a[j];\n }\n resa[i] = val;\n }\n}\n\n// test GenericPackedTensorAccessor and Tensor.generic_packed_accessor\nTEST(PackedtensoraccessorTest, PackedtensoraccessorTestHIP) {\n if (!at::cuda::is_available()) return;\n manual_seed(123);\n\n Tensor t1 = rand({4, 4}, HIP(kFloat));\n Tensor t2 = rand({4}, HIP(kFloat));\n Tensor res = empty({4}, HIP(kFloat));\n\n auto t1a = t1.packed_accessor64();\n auto t2a = t2.packed_accessor64();\n auto resa = res.packed_accessor64();\n\n auto stream = at::hip::getCurrentHIPStream();\n\n hipLaunchKernelGGL(( test_tensor_packed_accessor_kernel), dim3(1), dim3(1), 0, stream, resa, t1a, t2a);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n ASSERT_TRUE(hipSuccess == hipDeviceSynchronize());\n\n auto expected = mv(t1, t2);\n\n ASSERT_TRUE(res.allclose(expected));\n}\n###" }, { "cuda": "\n#pragma once\n// TODO: Remove once torchvision has been updated to use the ATen header\n#include \n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#pragma once\n// TODO: Remove once torchvision has been updated to use the ATen header\n#include \n###" }, { "cuda": "\n#pragma once\n// TODO: Remove this header\n#include \n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#pragma once\n// TODO: Remove this header\n#include \n###" }, { "cuda": "\n/**\n * Copyright (c) 2016-present, Facebook, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \n\n#include \n#include \n\n#include \"c10/util/Flags.h\"\n#include \"caffe2/core/common_gpu.h\"\n#include \"caffe2/core/init.h\"\n#include \"caffe2/core/logging.h\"\n\nusing std::vector;\n\nC10_DECLARE_int(caffe2_log_level);\n\nint main(int argc, char** argv) {\n caffe2::GlobalInit(&argc, &argv);\n c10::SetUsageMessage(\n \"Inspects the GPUs on the current machine and prints out their details \"\n \"provided by cuda.\");\n\n int gpu_count;\n CUDA_ENFORCE(cudaGetDeviceCount(&gpu_count));\n for (int i = 0; i < gpu_count; ++i) {\n LOG(INFO) << \"Querying device ID = \" << i;\n caffe2::DeviceQuery(i);\n }\n\n vector > access_pattern;\n CAFFE_ENFORCE(caffe2::GetCudaPeerAccessPattern(&access_pattern));\n\n std::stringstream sstream;\n // Find topology\n for (int i = 0; i < gpu_count; ++i) {\n for (int j = 0; j < gpu_count; ++j) {\n sstream << (access_pattern[i][j] ? \"+\" : \"-\") << \" \";\n }\n sstream << std::endl;\n }\n LOG(INFO) << \"Access pattern: \" << std::endl << sstream.str();\n\n return 0;\n}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n/**\n * Copyright (c) 2016-present, Facebook, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \n\n#include \n#include \n\n#include \"c10/util/Flags.h\"\n#include \"caffe2/core/hip/common_gpu.h\"\n#include \"caffe2/core/init.h\"\n#include \"caffe2/core/logging.h\"\n\nusing std::vector;\n\nC10_DECLARE_int(caffe2_log_level);\n\nint main(int argc, char** argv) {\n caffe2::GlobalInit(&argc, &argv);\n c10::SetUsageMessage(\n \"Inspects the GPUs on the current machine and prints out their details \"\n \"provided by cuda.\");\n\n int gpu_count;\n HIP_ENFORCE(hipGetDeviceCount(&gpu_count));\n for (int i = 0; i < gpu_count; ++i) {\n LOG(INFO) << \"Querying device ID = \" << i;\n caffe2::DeviceQuery(i);\n }\n\n vector > access_pattern;\n CAFFE_ENFORCE(caffe2::GetHipPeerAccessPattern(&access_pattern));\n\n std::stringstream sstream;\n // Find topology\n for (int i = 0; i < gpu_count; ++i) {\n for (int j = 0; j < gpu_count; ++j) {\n sstream << (access_pattern[i][j] ? \"+\" : \"-\") << \" \";\n }\n sstream << std::endl;\n }\n LOG(INFO) << \"Access pattern: \" << std::endl << sstream.str();\n\n return 0;\n}\n###" }, { "cuda": "\n/**\n * Copyright (c) 2016-present, Facebook, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \n\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/proto/caffe2_pb.h\"\n\n#define PRINT_SIZE(cls) \\\n std::cout << \"Size of \" #cls \": \" << sizeof(cls) << \" bytes.\" \\\n << std::endl;\n\nint main(int /* unused */, char** /* unused */) {\n PRINT_SIZE(caffe2::Blob);\n PRINT_SIZE(caffe2::Tensor);\n PRINT_SIZE(caffe2::CPUContext);\n PRINT_SIZE(caffe2::CUDAContext);\n PRINT_SIZE(caffe2::OperatorBase);\n PRINT_SIZE(caffe2::OperatorDef);\n PRINT_SIZE(caffe2::Operator);\n PRINT_SIZE(caffe2::Operator);\n PRINT_SIZE(caffe2::TypeMeta);\n PRINT_SIZE(caffe2::Workspace);\n return 0;\n}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n/**\n * Copyright (c) 2016-present, Facebook, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \n\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/proto/caffe2_pb.h\"\n\n#define PRINT_SIZE(cls) \\\n std::cout << \"Size of \" #cls \": \" << sizeof(cls) << \" bytes.\" \\\n << std::endl;\n\nint main(int /* unused */, char** /* unused */) {\n PRINT_SIZE(caffe2::Blob);\n PRINT_SIZE(caffe2::Tensor);\n PRINT_SIZE(caffe2::CPUContext);\n PRINT_SIZE(caffe2::HIPContext);\n PRINT_SIZE(caffe2::OperatorBase);\n PRINT_SIZE(caffe2::OperatorDef);\n PRINT_SIZE(caffe2::Operator);\n PRINT_SIZE(caffe2::Operator);\n PRINT_SIZE(caffe2::TypeMeta);\n PRINT_SIZE(caffe2::Workspace);\n return 0;\n}\n###" }, { "cuda": "\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \nusing ::testing::HasSubstr;\nvoid did_not_fail_diagnostics() {\n std::cerr\n << \"c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref().enabled_at_runtime = \"\n << c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref().enabled_at_runtime\n << std::endl;\n std::cerr\n << \"c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref().enabled_at_compile_time = \"\n << c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref().enabled_at_compile_time\n << std::endl;\n std::cerr\n << \"c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref().do_all_devices_support_managed_memory = \"\n << c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref()\n .do_all_devices_support_managed_memory\n << std::endl;\n}\n\n__global__ void cuda_always_fail_assertion_kernel(\n const int a, TORCH_DSA_KERNEL_ARGS) {\n CUDA_KERNEL_ASSERT2(a != a);\n}\n\nvoid cuda_device_assertions_1_var_test() {\n const auto stream = c10::cuda::getStreamFromPool();\n TORCH_DSA_KERNEL_LAUNCH(\n cuda_always_fail_assertion_kernel, 1, 1, 0, stream, 1);\n try {\n c10::cuda::device_synchronize();\n did_not_fail_diagnostics();\n throw std::runtime_error(\"Test didn't fail, but should have.\");\n } catch (const c10::Error& err) {\n const auto err_str = std::string(err.what());\n ASSERT_THAT(\n err_str, HasSubstr(\"CUDA device-side assertion failures were found on GPU #0!\"));\n ASSERT_THAT(\n err_str, HasSubstr(\"Thread ID that failed assertion = [0,0,0]\"));\n ASSERT_THAT(err_str, HasSubstr(\"Block ID that failed assertion = [0,0,0]\"));\n ASSERT_THAT(err_str, HasSubstr(\"Device that launched kernel = 0\"));\n ASSERT_THAT(\n err_str, HasSubstr(\n \"Name of kernel launched that led to failure = cuda_always_fail_assertion_kernel\"));\n ASSERT_THAT(\n err_str, HasSubstr(\"File containing kernel launch = \" __FILE__));\n ASSERT_THAT(\n err_str, HasSubstr(\n \"Function containing kernel launch = \" +\n std::string(__FUNCTION__)));\n ASSERT_THAT(\n err_str, HasSubstr(\n \"Stream kernel was launched on = \" + std::to_string(stream.id())));\n }\n}\nTEST(CUDATest, cuda_device_assertions_1_var_test) {\n#ifdef TORCH_USE_CUDA_DSA\n c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref().enabled_at_runtime = true;\n did_not_fail_diagnostics();\n cuda_device_assertions_1_var_test();\n#else\n GTEST_SKIP() << \"CUDA device-side assertions (DSA) was not enabled at compile time.\";\n#endif\n}\n\n###", "hip": " \n#include \"hip/hip_runtime.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \nusing ::testing::HasSubstr;\nvoid did_not_fail_diagnostics() {\n std::cerr\n << \"c10::hip::HIPKernelLaunchRegistry::get_singleton_ref().enabled_at_runtime = \"\n << c10::hip::HIPKernelLaunchRegistry::get_singleton_ref().enabled_at_runtime\n << std::endl;\n std::cerr\n << \"c10::hip::HIPKernelLaunchRegistry::get_singleton_ref().enabled_at_compile_time = \"\n << c10::hip::HIPKernelLaunchRegistry::get_singleton_ref().enabled_at_compile_time\n << std::endl;\n std::cerr\n << \"c10::hip::HIPKernelLaunchRegistry::get_singleton_ref().do_all_devices_support_managed_memory = \"\n << c10::hip::HIPKernelLaunchRegistry::get_singleton_ref()\n .do_all_devices_support_managed_memory\n << std::endl;\n}\n\n__global__ void hip_always_fail_assertion_kernel(\n const int a, TORCH_DSA_KERNEL_ARGS) {\n CUDA_KERNEL_ASSERT2(a != a);\n}\n\nvoid hip_device_assertions_1_var_test() {\n const auto stream = c10::hip::getStreamFromPool();\n TORCH_DSA_KERNEL_LAUNCH(\n hip_always_fail_assertion_kernel, 1, 1, 0, stream, 1);\n try {\n c10::hip::device_synchronize();\n did_not_fail_diagnostics();\n throw std::runtime_error(\"Test didn't fail, but should have.\");\n } catch (const c10::Error& err) {\n const auto err_str = std::string(err.what());\n ASSERT_THAT(\n err_str, HasSubstr(\"HIP device-side assertion failures were found on GPU #0!\"));\n ASSERT_THAT(\n err_str, HasSubstr(\"Thread ID that failed assertion = [0,0,0]\"));\n ASSERT_THAT(err_str, HasSubstr(\"Block ID that failed assertion = [0,0,0]\"));\n ASSERT_THAT(err_str, HasSubstr(\"Device that launched kernel = 0\"));\n ASSERT_THAT(\n err_str, HasSubstr(\n \"Name of kernel launched that led to failure = hip_always_fail_assertion_kernel\"));\n ASSERT_THAT(\n err_str, HasSubstr(\"File containing kernel launch = \" __FILE__));\n ASSERT_THAT(\n err_str, HasSubstr(\n \"Function containing kernel launch = \" +\n std::string(__FUNCTION__)));\n ASSERT_THAT(\n err_str, HasSubstr(\n \"Stream kernel was launched on = \" + std::to_string(stream.id())));\n }\n}\nTEST(HIPTest, hip_device_assertions_1_var_test) {\n#ifdef TORCH_USE_HIP_DSA\n c10::hip::HIPKernelLaunchRegistry::get_singleton_ref().enabled_at_runtime = true;\n did_not_fail_diagnostics();\n hip_device_assertions_1_var_test();\n#else\n GTEST_SKIP() << \"HIP device-side assertions (DSA) was not enabled at compile time.\";\n#endif\n}###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\ntemplate\nstruct AbsFunctor {\n __device__ __forceinline__ scalar_t operator() (const scalar_t a) const {\n return std::abs(a);\n }\n};\n\nCONSTEXPR_EXCEPT_WIN_CUDA char abs_name[] = \"abs_kernel\";\nvoid abs_kernel_cuda(TensorIteratorBase& iter) {\n auto dtype = iter.dtype();\n if (at::isComplexType(dtype)) {\n#if AT_USE_JITERATOR()\n static const auto abs_string = jiterator_stringify(\n template T abs_kernel(T x) { return std::abs(x); });\n AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, \"abs_cuda\", [&]() {\n jitted_gpu_kernel<\n /*name=*/abs_name,\n /*return_dtype=*/scalar_t,\n /*common_dtype=*/scalar_t,\n /*arity=*/1>(iter, abs_string);\n });\n#else\n AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, \"abs_cuda\", [&]() {\n using opmath_t = at::opmath_type;\n gpu_kernel(iter, AbsFunctor());\n });\n#endif\n } else {\n AT_DISPATCH_ALL_TYPES_AND3(\n ScalarType::Half,\n ScalarType::BFloat16,\n ScalarType::Bool,\n iter.dtype(),\n \"abs_cuda\",\n [&]() { gpu_kernel(iter, AbsFunctor()); });\n }\n}\n\n REGISTER_DISPATCH(abs_stub, &abs_kernel_cuda);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\ntemplate\nstruct AbsFunctor {\n __device__ __forceinline__ scalar_t operator() (const scalar_t a) const {\n return std::abs(a);\n }\n};\n\nCONSTEXPR_EXCEPT_WIN_HIP char abs_name[] = \"abs_kernel\";\nvoid abs_kernel_hip(TensorIteratorBase& iter) {\n auto dtype = iter.dtype();\n if (at::isComplexType(dtype)) {\n#if AT_USE_JITERATOR()\n static const auto abs_string = jiterator_stringify(\n template T abs_kernel(T x) { return std::abs(x); });\n AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, \"abs_hip\", [&]() {\n jitted_gpu_kernel<\n /*name=*/abs_name,\n /*return_dtype=*/scalar_t,\n /*common_dtype=*/scalar_t,\n /*arity=*/1>(iter, abs_string);\n });\n#else\n AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, \"abs_hip\", [&]() {\n using opmath_t = at::opmath_type;\n gpu_kernel(iter, AbsFunctor());\n });\n#endif\n } else {\n AT_DISPATCH_ALL_TYPES_AND3(\n ScalarType::Half,\n ScalarType::BFloat16,\n ScalarType::Bool,\n iter.dtype(),\n \"abs_hip\",\n [&]() { gpu_kernel(iter, AbsFunctor()); });\n }\n}\n\n REGISTER_DISPATCH(abs_stub, &abs_kernel_hip);\n\n} // namespace at::native\n###" }, { "cuda": "\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \nusing ::testing::HasSubstr;\n\n__global__ void cuda_multiple_vars_always_fail_assertion_kernel(\n const int a, const int b, const int c, const int d, TORCH_DSA_KERNEL_ARGS) {\n int i = a + b + c + d;\n if (i != 0) {\n CUDA_KERNEL_ASSERT2(i == -i);\n } else {\n CUDA_KERNEL_ASSERT2(i == i + 1);\n }\n}\n\n__global__ void cuda_always_fail_assertion_kernel(\n const int a, TORCH_DSA_KERNEL_ARGS) {\n CUDA_KERNEL_ASSERT2(a != a);\n}\n\nvoid cuda_device_assertions_catches_stream() {\n const auto stream = c10::cuda::getStreamFromPool();\n TORCH_DSA_KERNEL_LAUNCH(\n cuda_multiple_vars_always_fail_assertion_kernel, 1, 1, 0, stream, 1, 2, 3, 4 \n );\n try {\n c10::cuda::device_synchronize();\n throw std::runtime_error(\"Test didn't fail, but should have.\");\n } catch (const c10::Error& err) {\n const auto err_str = std::string(err.what());\n ASSERT_THAT(\n err_str, HasSubstr(\"# of GPUs this process interacted with = 1\"));\n ASSERT_THAT(\n err_str, HasSubstr(\"CUDA device-side assertion failures were found on GPU #0!\"));\n ASSERT_THAT(\n err_str, HasSubstr(\"Thread ID that failed assertion = [0,0,0]\"));\n ASSERT_THAT(err_str, HasSubstr(\"Block ID that failed assertion = [0,0,0]\"));\n ASSERT_THAT(err_str, HasSubstr(\"Device that launched kernel = 0\"));\n ASSERT_THAT(\n err_str, HasSubstr(\n \"Name of kernel launched that led to failure = cuda_multiple_vars_always_fail_assertion_kernel\"));\n ASSERT_THAT(\n err_str, HasSubstr(\"File containing kernel launch = \" __FILE__));\n ASSERT_THAT(\n err_str, HasSubstr(\n \"Function containing kernel launch = \" +\n std::string(__FUNCTION__)));\n ASSERT_THAT(\n err_str, HasSubstr(\n \"Stream kernel was launched on = \" + std::to_string(stream.id())));\n }\n}\nTEST(CUDATest, cuda_device_assertions_catches_stream) {\n#ifdef TORCH_USE_CUDA_DSA\n c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref().enabled_at_runtime = true;\n cuda_device_assertions_catches_stream();\n#else\n GTEST_SKIP() << \"CUDA device-side assertions (DSA) was not enabled at compile time.\";\n#endif\n}\n\n###", "hip": " \n#include \"hip/hip_runtime.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \nusing ::testing::HasSubstr;\n\n__global__ void hip_multiple_vars_always_fail_assertion_kernel(\n const int a, const int b, const int c, const int d, TORCH_DSA_KERNEL_ARGS) {\n int i = a + b + c + d;\n if (i != 0) {\n CUDA_KERNEL_ASSERT2(i == -i);\n } else {\n CUDA_KERNEL_ASSERT2(i == i + 1);\n }\n}\n\n__global__ void hip_always_fail_assertion_kernel(\n const int a, TORCH_DSA_KERNEL_ARGS) {\n CUDA_KERNEL_ASSERT2(a != a);\n}\n\nvoid hip_device_assertions_catches_stream() {\n const auto stream = c10::hip::getStreamFromPool();\n TORCH_DSA_KERNEL_LAUNCH(\n hip_multiple_vars_always_fail_assertion_kernel, 1, 1, 0, stream, 1, 2, 3, 4 \n );\n try {\n c10::hip::device_synchronize();\n throw std::runtime_error(\"Test didn't fail, but should have.\");\n } catch (const c10::Error& err) {\n const auto err_str = std::string(err.what());\n ASSERT_THAT(\n err_str, HasSubstr(\"# of GPUs this process interacted with = 1\"));\n ASSERT_THAT(\n err_str, HasSubstr(\"HIP device-side assertion failures were found on GPU #0!\"));\n ASSERT_THAT(\n err_str, HasSubstr(\"Thread ID that failed assertion = [0,0,0]\"));\n ASSERT_THAT(err_str, HasSubstr(\"Block ID that failed assertion = [0,0,0]\"));\n ASSERT_THAT(err_str, HasSubstr(\"Device that launched kernel = 0\"));\n ASSERT_THAT(\n err_str, HasSubstr(\n \"Name of kernel launched that led to failure = hip_multiple_vars_always_fail_assertion_kernel\"));\n ASSERT_THAT(\n err_str, HasSubstr(\"File containing kernel launch = \" __FILE__));\n ASSERT_THAT(\n err_str, HasSubstr(\n \"Function containing kernel launch = \" +\n std::string(__FUNCTION__)));\n ASSERT_THAT(\n err_str, HasSubstr(\n \"Stream kernel was launched on = \" + std::to_string(stream.id())));\n }\n}\nTEST(HIPTest, hip_device_assertions_catches_stream) {\n#ifdef TORCH_USE_HIP_DSA\n c10::hip::HIPKernelLaunchRegistry::get_singleton_ref().enabled_at_runtime = true;\n hip_device_assertions_catches_stream();\n#else\n GTEST_SKIP() << \"HIP device-side assertions (DSA) was not enabled at compile time.\";\n#endif\n}###" }, { "cuda": "\n#include \n#include \n\n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n\nusing ::testing::HasSubstr;\n\n/**\n * Device kernel that takes 2 arguments\n * @param bad_thread represents the thread we want to trigger assertion on.\n * @param bad_block represents the block we want to trigger assertion on.\n * This kernel will only trigger a device side assertion for <> pair. all the other blocks and threads pairs will basically be\n * no-op.\n */\n__global__ void cuda_device_assertions_fail_on_thread_block_kernel(\n const int bad_thread,\n const int bad_block,\n TORCH_DSA_KERNEL_ARGS) {\n if (threadIdx.x == bad_thread && blockIdx.x == bad_block) {\n CUDA_KERNEL_ASSERT2(false); // This comparison necessarily needs to fail\n }\n}\n\n/**\n * TEST: Triggering device side assertion on only 1 thread from <<<1024,128>>>\n * grid. kernel used is unique, it take 2 parameters to tell which particular\n * block and thread it should assert, all the other threads of the kernel will\n * be basically no-op.\n */\nvoid cuda_device_assertions_catches_thread_and_block_and_device() {\n const auto stream = c10::cuda::getStreamFromPool();\n TORCH_DSA_KERNEL_LAUNCH(\n cuda_device_assertions_fail_on_thread_block_kernel,\n 1024, /* Blocks */\n 128, /* Threads */\n 0, /* Shared mem */\n stream, /* Stream */\n 29, /* bad thread */\n 937 /* bad block */\n );\n\n try {\n c10::cuda::device_synchronize();\n throw std::runtime_error(\"Test didn't fail, but should have.\");\n } catch (const c10::Error& err) {\n const auto err_str = std::string(err.what());\n ASSERT_THAT(\n err_str, HasSubstr(\"Thread ID that failed assertion = [29,0,0]\"));\n ASSERT_THAT(\n err_str, HasSubstr(\"Block ID that failed assertion = [937,0,0]\"));\n ASSERT_THAT(err_str, HasSubstr(\"Device that launched kernel = 0\"));\n ASSERT_THAT(\n err_str,\n HasSubstr(\n \"Name of kernel launched that led to failure = cuda_device_assertions_fail_on_thread_block_kernel\"));\n ASSERT_THAT(\n err_str, HasSubstr(\"File containing kernel launch = \" __FILE__));\n ASSERT_THAT(\n err_str,\n HasSubstr(\n \"Function containing kernel launch = \" +\n std::string(__FUNCTION__)));\n ASSERT_THAT(\n err_str,\n HasSubstr(\n \"Stream kernel was launched on = \" + std::to_string(stream.id())));\n }\n}\n\nTEST(CUDATest, cuda_device_assertions_catches_thread_and_block_and_device) {\n#ifdef TORCH_USE_CUDA_DSA\n c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref().enabled_at_runtime = true;\n cuda_device_assertions_catches_thread_and_block_and_device();\n#else\n GTEST_SKIP() << \"CUDA device-side assertions (DSA) was not enabled at compile time.\";\n#endif\n}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \n#include \n\n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n\nusing ::testing::HasSubstr;\n\n/**\n * Device kernel that takes 2 arguments\n * @param bad_thread represents the thread we want to trigger assertion on.\n * @param bad_block represents the block we want to trigger assertion on.\n * This kernel will only trigger a device side assertion for <> pair. all the other blocks and threads pairs will basically be\n * no-op.\n */\n__global__ void hip_device_assertions_fail_on_thread_block_kernel(\n const int bad_thread,\n const int bad_block,\n TORCH_DSA_KERNEL_ARGS) {\n if (threadIdx.x == bad_thread && blockIdx.x == bad_block) {\n CUDA_KERNEL_ASSERT2(false); // This comparison necessarily needs to fail\n }\n}\n\n/**\n * TEST: Triggering device side assertion on only 1 thread from <<<1024,128>>>\n * grid. kernel used is unique, it take 2 parameters to tell which particular\n * block and thread it should assert, all the other threads of the kernel will\n * be basically no-op.\n */\nvoid hip_device_assertions_catches_thread_and_block_and_device() {\n const auto stream = c10::hip::getStreamFromPool();\n TORCH_DSA_KERNEL_LAUNCH(\n hip_device_assertions_fail_on_thread_block_kernel,\n 1024, /* Blocks */\n 128, /* Threads */\n 0, /* Shared mem */\n stream, /* Stream */\n 29, /* bad thread */\n 937 /* bad block */\n );\n\n try {\n c10::hip::device_synchronize();\n throw std::runtime_error(\"Test didn't fail, but should have.\");\n } catch (const c10::Error& err) {\n const auto err_str = std::string(err.what());\n ASSERT_THAT(\n err_str, HasSubstr(\"Thread ID that failed assertion = [29,0,0]\"));\n ASSERT_THAT(\n err_str, HasSubstr(\"Block ID that failed assertion = [937,0,0]\"));\n ASSERT_THAT(err_str, HasSubstr(\"Device that launched kernel = 0\"));\n ASSERT_THAT(\n err_str,\n HasSubstr(\n \"Name of kernel launched that led to failure = hip_device_assertions_fail_on_thread_block_kernel\"));\n ASSERT_THAT(\n err_str, HasSubstr(\"File containing kernel launch = \" __FILE__));\n ASSERT_THAT(\n err_str,\n HasSubstr(\n \"Function containing kernel launch = \" +\n std::string(__FUNCTION__)));\n ASSERT_THAT(\n err_str,\n HasSubstr(\n \"Stream kernel was launched on = \" + std::to_string(stream.id())));\n }\n}\n\nTEST(HIPTest, hip_device_assertions_catches_thread_and_block_and_device) {\n#ifdef TORCH_USE_HIP_DSA\n c10::hip::HIPKernelLaunchRegistry::get_singleton_ref().enabled_at_runtime = true;\n hip_device_assertions_catches_thread_and_block_and_device();\n#else\n GTEST_SKIP() << \"HIP device-side assertions (DSA) was not enabled at compile time.\";\n#endif\n}\n###" }, { "cuda": "\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \nusing ::testing::HasSubstr;\nconst auto max_assertions_failure_str =\n \"Assertion failure \" + std::to_string(C10_CUDA_DSA_ASSERTION_COUNT - 1);\n\n__global__ void cuda_always_fail_assertion_kernel(\n const int a, TORCH_DSA_KERNEL_ARGS) {\n CUDA_KERNEL_ASSERT2(a != a);\n}\n\n__global__ void cuda_always_succeed_assertion_kernel(\n const int a, TORCH_DSA_KERNEL_ARGS) {\n CUDA_KERNEL_ASSERT2(a == a);\n}\n\n#ifndef _MSC_VER\n\nvoid cuda_device_assertions_from_2_processes() {\n const auto n1 = fork();\n if (n1 == 0) {\n \n \n \n TORCH_DSA_KERNEL_LAUNCH(\n cuda_always_fail_assertion_kernel, 1, 1, 0, c10::cuda::getStreamFromPool(), 1);\n try {\n c10::cuda::device_synchronize();\n throw std::runtime_error(\"Test didn't fail, but should have.\");\n } catch (const c10::Error& err) {\n const auto err_str = std::string(err.what());\n ASSERT_THAT(\n err_str, HasSubstr(\n \"1 CUDA device-side assertion failures were found on GPU #0!\"));\n }\n \n std::this_thread::sleep_for(std::chrono::milliseconds(3000));\n } else {\n \n \n \n std::this_thread::sleep_for(std::chrono::milliseconds(2000));\n TORCH_DSA_KERNEL_LAUNCH(\n cuda_always_succeed_assertion_kernel, 1, 1, 0, c10::cuda::getStreamFromPool(), 1);\n try {\n c10::cuda::device_synchronize();\n } catch (const c10::Error& err) {\n ASSERT_TRUE(false); \n }\n \n exit(0);\n }\n}\nTEST(CUDATest, cuda_device_assertions_from_2_processes) {\n#ifdef TORCH_USE_CUDA_DSA\n c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref().enabled_at_runtime = true;\n cuda_device_assertions_from_2_processes();\n#else\n GTEST_SKIP() << \"CUDA device-side assertions (DSA) was not enabled at compile time.\";\n#endif\n}\n#else\n#endif\n\n###", "hip": " \n#include \"hip/hip_runtime.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \nusing ::testing::HasSubstr;\nconst auto max_assertions_failure_str =\n \"Assertion failure \" + std::to_string(C10_HIP_DSA_ASSERTION_COUNT - 1);\n\n__global__ void hip_always_fail_assertion_kernel(\n const int a, TORCH_DSA_KERNEL_ARGS) {\n CUDA_KERNEL_ASSERT2(a != a);\n}\n\n__global__ void hip_always_succeed_assertion_kernel(\n const int a, TORCH_DSA_KERNEL_ARGS) {\n CUDA_KERNEL_ASSERT2(a == a);\n}\n\n#ifndef _MSC_VER\n\nvoid hip_device_assertions_from_2_processes() {\n const auto n1 = fork();\n if (n1 == 0) {\n \n \n \n TORCH_DSA_KERNEL_LAUNCH(\n hip_always_fail_assertion_kernel, 1, 1, 0, c10::hip::getStreamFromPool(), 1);\n try {\n c10::hip::device_synchronize();\n throw std::runtime_error(\"Test didn't fail, but should have.\");\n } catch (const c10::Error& err) {\n const auto err_str = std::string(err.what());\n ASSERT_THAT(\n err_str, HasSubstr(\n \"1 HIP device-side assertion failures were found on GPU #0!\"));\n }\n \n std::this_thread::sleep_for(std::chrono::milliseconds(3000));\n } else {\n \n \n \n std::this_thread::sleep_for(std::chrono::milliseconds(2000));\n TORCH_DSA_KERNEL_LAUNCH(\n hip_always_succeed_assertion_kernel, 1, 1, 0, c10::hip::getStreamFromPool(), 1);\n try {\n c10::hip::device_synchronize();\n } catch (const c10::Error& err) {\n ASSERT_TRUE(false); \n }\n \n exit(0);\n }\n}\nTEST(HIPTest, hip_device_assertions_from_2_processes) {\n#ifdef TORCH_USE_HIP_DSA\n c10::hip::HIPKernelLaunchRegistry::get_singleton_ref().enabled_at_runtime = true;\n hip_device_assertions_from_2_processes();\n#else\n GTEST_SKIP() << \"HIP device-side assertions (DSA) was not enabled at compile time.\";\n#endif\n}\n#else\n#endif###" }, { "cuda": "\n#include \n#include \n\n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n\nusing ::testing::HasSubstr;\n\nconst auto max_assertions_failure_str =\n \"Assertion failure \" + std::to_string(C10_CUDA_DSA_ASSERTION_COUNT - 1);\n\n/**\n * Device kernel that takes a single integer parameter as argument and\n * will always trigger a device side assertion.\n */\n__global__ void cuda_always_fail_assertion_kernel(\n const int a,\n TORCH_DSA_KERNEL_ARGS) {\n CUDA_KERNEL_ASSERT2(a != a);\n}\n\n/**\n * TEST: Triggering device side assertion from multiple block but single thread\n * <<<10,128>>>. Here we are triggering assertion on 10 blocks, each with only\n * 128 thread.\n */\nvoid cuda_device_assertions_multiple_writes_from_blocks_and_threads() {\n bool run_threads = false;\n\n // Create a function to launch kernel that waits for a signal, to try to\n // ensure everything is happening simultaneously\n const auto launch_the_kernel = [&]() {\n // Busy loop waiting for the signal to go\n while (!run_threads) {\n }\n\n TORCH_DSA_KERNEL_LAUNCH(\n cuda_always_fail_assertion_kernel,\n 10, /* Blocks */\n 128, /* Threads */\n 0, /* Shared mem */\n c10::cuda::getCurrentCUDAStream(), /* Stream */\n 1);\n };\n\n // Spin up a bunch of busy-looping threads\n std::vector threads;\n for (int i = 0; i < 10; i++) {\n threads.emplace_back(launch_the_kernel);\n }\n\n // Paranoid - wait for all the threads to get setup\n std::this_thread::sleep_for(std::chrono::milliseconds(100));\n\n // Mash\n run_threads = true;\n\n // Clean-up\n for (auto& x : threads) {\n x.join();\n }\n\n try {\n c10::cuda::device_synchronize();\n throw std::runtime_error(\"Test didn't fail, but should have.\");\n } catch (const c10::Error& err) {\n const auto err_str = std::string(err.what());\n ASSERT_THAT(err_str, HasSubstr(max_assertions_failure_str));\n ASSERT_THAT(err_str, HasSubstr(\"Device that launched kernel = 0\"));\n ASSERT_THAT(\n err_str,\n HasSubstr(\n \"Name of kernel launched that led to failure = cuda_always_fail_assertion_kernel\"));\n ASSERT_THAT(\n err_str, HasSubstr(\"File containing kernel launch = \" __FILE__));\n }\n}\n\nTEST(CUDATest, cuda_device_assertions_multiple_writes_from_blocks_and_threads) {\n#ifdef TORCH_USE_CUDA_DSA\n c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref().enabled_at_runtime = true;\n cuda_device_assertions_multiple_writes_from_blocks_and_threads();\n#else\n GTEST_SKIP() << \"CUDA device-side assertions (DSA) was not enabled at compile time.\";\n#endif\n}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \n#include \n\n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n\nusing ::testing::HasSubstr;\n\nconst auto max_assertions_failure_str =\n \"Assertion failure \" + std::to_string(C10_HIP_DSA_ASSERTION_COUNT - 1);\n\n/**\n * Device kernel that takes a single integer parameter as argument and\n * will always trigger a device side assertion.\n */\n__global__ void hip_always_fail_assertion_kernel(\n const int a,\n TORCH_DSA_KERNEL_ARGS) {\n CUDA_KERNEL_ASSERT2(a != a);\n}\n\n/**\n * TEST: Triggering device side assertion from multiple block but single thread\n * <<<10,128>>>. Here we are triggering assertion on 10 blocks, each with only\n * 128 thread.\n */\nvoid hip_device_assertions_multiple_writes_from_blocks_and_threads() {\n bool run_threads = false;\n\n // Create a function to launch kernel that waits for a signal, to try to\n // ensure everything is happening simultaneously\n const auto launch_the_kernel = [&]() {\n // Busy loop waiting for the signal to go\n while (!run_threads) {\n }\n\n TORCH_DSA_KERNEL_LAUNCH(\n hip_always_fail_assertion_kernel,\n 10, /* Blocks */\n 128, /* Threads */\n 0, /* Shared mem */\n c10::hip::getCurrentHIPStream(), /* Stream */\n 1);\n };\n\n // Spin up a bunch of busy-looping threads\n std::vector threads;\n for (int i = 0; i < 10; i++) {\n threads.emplace_back(launch_the_kernel);\n }\n\n // Paranoid - wait for all the threads to get setup\n std::this_thread::sleep_for(std::chrono::milliseconds(100));\n\n // Mash\n run_threads = true;\n\n // Clean-up\n for (auto& x : threads) {\n x.join();\n }\n\n try {\n c10::hip::device_synchronize();\n throw std::runtime_error(\"Test didn't fail, but should have.\");\n } catch (const c10::Error& err) {\n const auto err_str = std::string(err.what());\n ASSERT_THAT(err_str, HasSubstr(max_assertions_failure_str));\n ASSERT_THAT(err_str, HasSubstr(\"Device that launched kernel = 0\"));\n ASSERT_THAT(\n err_str,\n HasSubstr(\n \"Name of kernel launched that led to failure = hip_always_fail_assertion_kernel\"));\n ASSERT_THAT(\n err_str, HasSubstr(\"File containing kernel launch = \" __FILE__));\n }\n}\n\nTEST(HIPTest, hip_device_assertions_multiple_writes_from_blocks_and_threads) {\n#ifdef TORCH_USE_HIP_DSA\n c10::hip::HIPKernelLaunchRegistry::get_singleton_ref().enabled_at_runtime = true;\n hip_device_assertions_multiple_writes_from_blocks_and_threads();\n#else\n GTEST_SKIP() << \"HIP device-side assertions (DSA) was not enabled at compile time.\";\n#endif\n}\n###" }, { "cuda": "\n#include \n#include \n\n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n\nusing ::testing::HasSubstr;\n\nconst auto max_assertions_failure_str =\n \"Assertion failure \" + std::to_string(C10_CUDA_DSA_ASSERTION_COUNT - 1);\n\n/**\n * Device kernel that takes a single integer parameter as argument and\n * will always trigger a device side assertion.\n */\n__global__ void cuda_always_fail_assertion_kernel(\n const int a,\n TORCH_DSA_KERNEL_ARGS) {\n CUDA_KERNEL_ASSERT2(a != a);\n}\n\n/**\n * TEST: Triggering device side assertion from single block and multiple threads\n * <<<1,128>>>. Once the very first thread asserts all the other threads will\n * basically be in bad state and the block id with failed assertion would be\n * [0,0,0].\n */\nvoid cuda_device_assertions_multiple_writes_from_same_block() {\n const auto stream = c10::cuda::getStreamFromPool();\n TORCH_DSA_KERNEL_LAUNCH(\n cuda_always_fail_assertion_kernel,\n 1, /* Blocks */\n 128, /* Threads */\n 0, /* Shared mem */\n stream, /* Stream */\n 1);\n\n try {\n c10::cuda::device_synchronize();\n throw std::runtime_error(\"Test didn't fail, but should have.\");\n } catch (const c10::Error& err) {\n const auto err_str = std::string(err.what());\n ASSERT_THAT(err_str, HasSubstr(max_assertions_failure_str));\n ASSERT_THAT(err_str, HasSubstr(\"Block ID that failed assertion = [0,0,0]\"));\n ASSERT_THAT(err_str, HasSubstr(\"Device that launched kernel = 0\"));\n ASSERT_THAT(\n err_str,\n HasSubstr(\n \"Name of kernel launched that led to failure = cuda_always_fail_assertion_kernel\"));\n ASSERT_THAT(\n err_str, HasSubstr(\"File containing kernel launch = \" __FILE__));\n ASSERT_THAT(\n err_str,\n HasSubstr(\n \"Function containing kernel launch = \" +\n std::string(__FUNCTION__)));\n ASSERT_THAT(\n err_str,\n HasSubstr(\n \"Stream kernel was launched on = \" + std::to_string(stream.id())));\n }\n}\n\nTEST(CUDATest, cuda_device_assertions_multiple_writes_from_same_block) {\n#ifdef TORCH_USE_CUDA_DSA\n c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref().enabled_at_runtime = true;\n cuda_device_assertions_multiple_writes_from_same_block();\n#else\n GTEST_SKIP() << \"CUDA device-side assertions (DSA) was not enabled at compile time.\";\n#endif\n}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \n#include \n\n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n\nusing ::testing::HasSubstr;\n\nconst auto max_assertions_failure_str =\n \"Assertion failure \" + std::to_string(C10_HIP_DSA_ASSERTION_COUNT - 1);\n\n/**\n * Device kernel that takes a single integer parameter as argument and\n * will always trigger a device side assertion.\n */\n__global__ void hip_always_fail_assertion_kernel(\n const int a,\n TORCH_DSA_KERNEL_ARGS) {\n CUDA_KERNEL_ASSERT2(a != a);\n}\n\n/**\n * TEST: Triggering device side assertion from single block and multiple threads\n * <<<1,128>>>. Once the very first thread asserts all the other threads will\n * basically be in bad state and the block id with failed assertion would be\n * [0,0,0].\n */\nvoid hip_device_assertions_multiple_writes_from_same_block() {\n const auto stream = c10::hip::getStreamFromPool();\n TORCH_DSA_KERNEL_LAUNCH(\n hip_always_fail_assertion_kernel,\n 1, /* Blocks */\n 128, /* Threads */\n 0, /* Shared mem */\n stream, /* Stream */\n 1);\n\n try {\n c10::hip::device_synchronize();\n throw std::runtime_error(\"Test didn't fail, but should have.\");\n } catch (const c10::Error& err) {\n const auto err_str = std::string(err.what());\n ASSERT_THAT(err_str, HasSubstr(max_assertions_failure_str));\n ASSERT_THAT(err_str, HasSubstr(\"Block ID that failed assertion = [0,0,0]\"));\n ASSERT_THAT(err_str, HasSubstr(\"Device that launched kernel = 0\"));\n ASSERT_THAT(\n err_str,\n HasSubstr(\n \"Name of kernel launched that led to failure = hip_always_fail_assertion_kernel\"));\n ASSERT_THAT(\n err_str, HasSubstr(\"File containing kernel launch = \" __FILE__));\n ASSERT_THAT(\n err_str,\n HasSubstr(\n \"Function containing kernel launch = \" +\n std::string(__FUNCTION__)));\n ASSERT_THAT(\n err_str,\n HasSubstr(\n \"Stream kernel was launched on = \" + std::to_string(stream.id())));\n }\n}\n\nTEST(HIPTest, hip_device_assertions_multiple_writes_from_same_block) {\n#ifdef TORCH_USE_HIP_DSA\n c10::hip::HIPKernelLaunchRegistry::get_singleton_ref().enabled_at_runtime = true;\n hip_device_assertions_multiple_writes_from_same_block();\n#else\n GTEST_SKIP() << \"HIP device-side assertions (DSA) was not enabled at compile time.\";\n#endif\n}\n###" }, { "cuda": "\n#include \"caffe2/contrib/aten/aten_op.h\"\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_CUDA_OPERATOR(ATen, ATenOp);\ntemplate<>\nat::Backend ATenOp::backend() const {\n return at::Backend::CUDA;\n}\n\n}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/contrib/aten/aten_op.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_HIP_OPERATOR(ATen, ATenOp);\ntemplate<>\nat::Backend ATenOp::backend() const {\n return at::Backend::HIP;\n}\n\n}\n###" }, { "cuda": "\n#include \"caffe2/contrib/gloo/broadcast_ops.h\"\n\n#include \"caffe2/core/context_gpu.h\"\n\n#include \n\nnamespace caffe2 {\nnamespace gloo {\n\ntemplate \nvoid BroadcastOp::initializeAlgorithm() {\n if (init_.template IsType()) {\n algorithm_.reset(new ::gloo::CudaBroadcastOneToAll(\n init_.context, init_.template getOutputs(), init_.size, root_));\n } else if (init_.template IsType()) {\n algorithm_.reset(new ::gloo::CudaBroadcastOneToAll(\n init_.context, init_.template getOutputs(), init_.size, root_));\n } else if (init_.template IsType()) {\n algorithm_.reset(new ::gloo::CudaBroadcastOneToAll(\n init_.context, init_.template getOutputs(), init_.size, root_));\n } else if (init_.template IsType()) {\n algorithm_.reset(new ::gloo::CudaBroadcastOneToAll<::gloo::float16>(\n init_.context,\n init_.template getOutputs<::gloo::float16>(),\n init_.size,\n root_));\n } else {\n CAFFE_ENFORCE(false, \"Unhandled type: \", init_.meta.name());\n }\n}\n\nnamespace {\n\nREGISTER_CUDA_OPERATOR_WITH_ENGINE(Broadcast, GLOO, BroadcastOp);\n\n} // namespace\n} // namespace gloo\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/contrib/gloo/broadcast_ops.h\"\n\n#include \"caffe2/core/hip/context_gpu.h\"\n\n#include \n\nnamespace caffe2 {\nnamespace gloo {\n\ntemplate \nvoid BroadcastOp::initializeAlgorithm() {\n if (init_.template IsType()) {\n algorithm_.reset(new ::gloo::HipBroadcastOneToAll(\n init_.context, init_.template getOutputs(), init_.size, root_));\n } else if (init_.template IsType()) {\n algorithm_.reset(new ::gloo::HipBroadcastOneToAll(\n init_.context, init_.template getOutputs(), init_.size, root_));\n } else if (init_.template IsType()) {\n algorithm_.reset(new ::gloo::HipBroadcastOneToAll(\n init_.context, init_.template getOutputs(), init_.size, root_));\n } else if (init_.template IsType()) {\n algorithm_.reset(new ::gloo::HipBroadcastOneToAll<::gloo::float16>(\n init_.context,\n init_.template getOutputs<::gloo::float16>(),\n init_.size,\n root_));\n } else {\n CAFFE_ENFORCE(false, \"Unhandled type: \", init_.meta.name());\n }\n}\n\nnamespace {\n\nREGISTER_HIP_OPERATOR_WITH_ENGINE(Broadcast, GLOO, BroadcastOp);\n\n} // namespace\n} // namespace gloo\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/contrib/gloo/common_world_ops.h\"\n\n#include \"caffe2/core/context_gpu.h\"\n\n#include \n#include \n\nnamespace caffe2 {\nnamespace gloo {\n\ntemplate <>\nvoid CreateCommonWorld::initializeForContext() {\n static std::once_flag once;\n std::call_once(once, [&]() {\n // This is the first time we call Gloo code for a CUDAContext.\n // Share Caffe2 CUDA mutex with Gloo.\n ::gloo::CudaShared::setMutex(&CUDAContext::mutex());\n });\n}\n\nnamespace {\n\nREGISTER_CUDA_OPERATOR_WITH_ENGINE(\n CreateCommonWorld,\n GLOO,\n CreateCommonWorld);\n\nREGISTER_CUDA_OPERATOR_WITH_ENGINE(\n CloneCommonWorld,\n GLOO,\n CloneCommonWorld);\n\n} // namespace\n} // namespace gloo\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/contrib/gloo/common_world_ops.h\"\n\n#include \"caffe2/core/hip/context_gpu.h\"\n\n#include \n#include \n\nnamespace caffe2 {\nnamespace gloo {\n\ntemplate <>\nvoid CreateCommonWorld::initializeForContext() {\n static std::once_flag once;\n std::call_once(once, [&]() {\n // This is the first time we call Gloo code for a HIPContext.\n // Share Caffe2 HIP mutex with Gloo.\n ::gloo::HipShared::setMutex(&HIPContext::mutex());\n });\n}\n\nnamespace {\n\nREGISTER_HIP_OPERATOR_WITH_ENGINE(\n CreateCommonWorld,\n GLOO,\n CreateCommonWorld);\n\nREGISTER_HIP_OPERATOR_WITH_ENGINE(\n CloneCommonWorld,\n GLOO,\n CloneCommonWorld);\n\n} // namespace\n} // namespace gloo\n} // namespace caffe2\n###" }, { "cuda": "\n#pragma once\n\n#include \n\n#include \"caffe2/core/common_gpu.h\"\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/core/logging.h\"\n\n#include \n#include \n\n#define NCCL_VERSION_MIN(major, minor, patch) \\\n ((NCCL_MAJOR > major) || \\\n ((NCCL_MAJOR == major) && \\\n ((NCCL_MINOR > minor) || \\\n ((NCCL_MINOR == minor) && (NCCL_PATCH >= patch)))))\n\nnamespace caffe2 {\nnamespace nccl {\n\n#define CAFFE_NCCL_CHECK(condition) \\\n do { \\\n ncclResult_t status = (condition); \\\n CAFFE_ENFORCE_EQ( \\\n status, \\\n ncclSuccess, \\\n \" \", \\\n \"Error at: \", \\\n __FILE__, \\\n __LINE__, \\\n \": \", \\\n ncclGetErrorString(status)); \\\n } while (0)\n\nstruct NCCLElement {\n const TensorCUDA* src{nullptr};\n TensorCUDA* dst{nullptr};\n int device{0};\n};\n\nstruct NCCLExecution {\n int stream_gpu_id{0};\n cudaStream_t stream{nullptr};\n std::vector elements;\n size_t root{0};\n};\n\n// Called when the last NCCL op is destructed and all lazily created\n// NCCLContext instances can safely be destroyed.\nvoid destroyContexts();\n\ntemplate \nclass NCCL {\n public:\n static void AllReduce(const NCCLExecution& ex);\n static void Broadcast(const NCCLExecution& ex);\n static void Reduce(const NCCLExecution& ex);\n static void AllGather(const NCCLExecution& ex);\n static void ReduceScatter(const NCCLExecution& ex);\n};\n\n} // namespace nccl\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#pragma once\n\n#include \n\n#include \"caffe2/core/hip/common_gpu.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/core/logging.h\"\n\n#include \n#include \n\n#define NCCL_VERSION_MIN(major, minor, patch) \\\n ((NCCL_MAJOR > major) || \\\n ((NCCL_MAJOR == major) && \\\n ((NCCL_MINOR > minor) || \\\n ((NCCL_MINOR == minor) && (NCCL_PATCH >= patch)))))\n\nnamespace caffe2 {\nnamespace nccl {\n\n#define CAFFE_NCCL_CHECK(condition) \\\n do { \\\n ncclResult_t status = (condition); \\\n CAFFE_ENFORCE_EQ( \\\n status, \\\n ncclSuccess, \\\n \" \", \\\n \"Error at: \", \\\n __FILE__, \\\n __LINE__, \\\n \": \", \\\n ncclGetErrorString(status)); \\\n } while (0)\n\nstruct NCCLElement {\n const TensorHIP* src{nullptr};\n TensorHIP* dst{nullptr};\n int device{0};\n};\n\nstruct NCCLExecution {\n int stream_gpu_id{0};\n hipStream_t stream{nullptr};\n std::vector elements;\n size_t root{0};\n};\n\n// Called when the last NCCL op is destructed and all lazily created\n// NCCLContext instances can safely be destroyed.\nvoid destroyContexts();\n\ntemplate \nclass NCCL {\n public:\n static void AllReduce(const NCCLExecution& ex);\n static void Broadcast(const NCCLExecution& ex);\n static void Reduce(const NCCLExecution& ex);\n static void AllGather(const NCCLExecution& ex);\n static void ReduceScatter(const NCCLExecution& ex);\n};\n\n} // namespace nccl\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/blob.h\"\n#include \"caffe2/core/blob_serialization.h\"\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\nREGISTER_BLOB_DESERIALIZER(TensorCUDA, TensorDeserializer);\n}\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/blob.h\"\n#include \"caffe2/core/blob_serialization.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\nREGISTER_BLOB_DESERIALIZER(TensorHIP, TensorDeserializer);\n}\n} // namespace caffe2\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#define _USE_MATH_DEFINES\n\n#include \n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\nnamespace {\n\nvoid elu_kernel(\n TensorIteratorBase& iter,\n const Scalar& alpha,\n const Scalar& scale,\n const Scalar& input_scale) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half,\n at::ScalarType::BFloat16,\n iter.dtype(),\n \"elu_cuda\",\n [&]() {\n using opmath_t = at::opmath_type;\n auto negcoef = alpha.to() * scale.to();\n auto poscoef = scale.to();\n auto negiptcoef = input_scale.to();\n gpu_kernel(\n iter,\n [negcoef, poscoef, negiptcoef] GPU_LAMBDA(scalar_t a) -> scalar_t {\n opmath_t aop = static_cast(a);\n return aop > 0 ? aop * poscoef\n : std::expm1(aop * negiptcoef) * negcoef;\n });\n });\n}\n\nvoid elu_backward_kernel(\n TensorIteratorBase& iter,\n const Scalar& alpha,\n const Scalar& scale,\n const Scalar& input_scale,\n bool is_result) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half,\n at::ScalarType::BFloat16,\n iter.dtype(),\n \"elu_backward_cuda\",\n [&]() {\n using opmath_t = at::opmath_type;\n auto negcoef = alpha.to() * scale.to();\n auto poscoef = scale.to();\n auto negiptcoef = input_scale.to();\n gpu_kernel(\n iter,\n [negcoef, poscoef, negiptcoef, is_result] GPU_LAMBDA(\n scalar_t a, scalar_t b) -> scalar_t {\n opmath_t aop = static_cast(a);\n opmath_t bop = static_cast(b);\n\n if (is_result) {\n return bop <= 0 ? aop * negiptcoef * (bop + negcoef)\n : aop * poscoef;\n } else {\n return bop <= 0\n ? aop * negiptcoef * negcoef * std::exp(bop * negiptcoef)\n : aop * poscoef;\n }\n });\n });\n}\n} // namespace\n\nREGISTER_DISPATCH(elu_stub, &elu_kernel);\nREGISTER_DISPATCH(elu_backward_stub, &elu_backward_kernel);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#define _USE_MATH_DEFINES\n\n#include \n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\nnamespace {\n\nvoid elu_kernel(\n TensorIteratorBase& iter,\n const Scalar& alpha,\n const Scalar& scale,\n const Scalar& input_scale) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half,\n at::ScalarType::BFloat16,\n iter.dtype(),\n \"elu_hip\",\n [&]() {\n using opmath_t = at::opmath_type;\n auto negcoef = alpha.to() * scale.to();\n auto poscoef = scale.to();\n auto negiptcoef = input_scale.to();\n gpu_kernel(\n iter,\n [negcoef, poscoef, negiptcoef] GPU_LAMBDA(scalar_t a) -> scalar_t {\n opmath_t aop = static_cast(a);\n return aop > 0 ? aop * poscoef\n : std::expm1(aop * negiptcoef) * negcoef;\n });\n });\n}\n\nvoid elu_backward_kernel(\n TensorIteratorBase& iter,\n const Scalar& alpha,\n const Scalar& scale,\n const Scalar& input_scale,\n bool is_result) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half,\n at::ScalarType::BFloat16,\n iter.dtype(),\n \"elu_backward_hip\",\n [&]() {\n using opmath_t = at::opmath_type;\n auto negcoef = alpha.to() * scale.to();\n auto poscoef = scale.to();\n auto negiptcoef = input_scale.to();\n gpu_kernel(\n iter,\n [negcoef, poscoef, negiptcoef, is_result] GPU_LAMBDA(\n scalar_t a, scalar_t b) -> scalar_t {\n opmath_t aop = static_cast(a);\n opmath_t bop = static_cast(b);\n\n if (is_result) {\n return bop <= 0 ? aop * negiptcoef * (bop + negcoef)\n : aop * poscoef;\n } else {\n return bop <= 0\n ? aop * negiptcoef * negcoef * ::exp(bop * negiptcoef)\n : aop * poscoef;\n }\n });\n });\n}\n} // namespace\n\nREGISTER_DISPATCH(elu_stub, &elu_kernel);\nREGISTER_DISPATCH(elu_backward_stub, &elu_backward_kernel);\n\n} // namespace at::native\n###" }, { "cuda": "\n#include \n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/core/event.h\"\n\nnamespace caffe2 {\n\nTEST(EventCUDATest, EventBasics) {\n if (!HasCudaGPU())\n return;\n DeviceOption device_cpu;\n device_cpu.set_device_type(PROTO_CPU);\n DeviceOption device_cuda;\n device_cuda.set_device_type(PROTO_CUDA);\n\n CPUContext context_cpu(device_cpu);\n CUDAContext context_cuda(device_cuda);\n\n Event event_cpu(device_cpu);\n Event event_cuda(device_cuda);\n\n // CPU context and event interactions\n context_cpu.Record(&event_cpu);\n event_cpu.SetFinished();\n event_cpu.Finish();\n context_cpu.WaitEvent(event_cpu);\n\n event_cpu.Reset();\n event_cpu.Record(CPU, &context_cpu);\n event_cpu.SetFinished();\n event_cpu.Wait(CPU, &context_cpu);\n\n // CUDA context and event interactions\n context_cuda.SwitchToDevice();\n context_cuda.Record(&event_cuda);\n context_cuda.WaitEvent(event_cuda);\n event_cuda.Finish();\n\n event_cuda.Reset();\n event_cuda.Record(CUDA, &context_cuda);\n event_cuda.Wait(CUDA, &context_cuda);\n\n // CPU context waiting for CUDA event\n context_cpu.WaitEvent(event_cuda);\n\n // CUDA context waiting for CPU event\n context_cuda.WaitEvent(event_cpu);\n}\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/core/event.h\"\n\nnamespace caffe2 {\n\nTEST(EventHIPTest, EventBasics) {\n if (!HasHipGPU())\n return;\n DeviceOption device_cpu;\n device_cpu.set_device_type(PROTO_CPU);\n DeviceOption device_hip;\n device_hip.set_device_type(PROTO_HIP);\n\n CPUContext context_cpu(device_cpu);\n HIPContext context_hip(device_hip);\n\n Event event_cpu(device_cpu);\n Event event_hip(device_hip);\n\n // CPU context and event interactions\n context_cpu.Record(&event_cpu);\n event_cpu.SetFinished();\n event_cpu.Finish();\n context_cpu.WaitEvent(event_cpu);\n\n event_cpu.Reset();\n event_cpu.Record(CPU, &context_cpu);\n event_cpu.SetFinished();\n event_cpu.Wait(CPU, &context_cpu);\n\n // HIP context and event interactions\n context_hip.SwitchToDevice();\n context_hip.Record(&event_hip);\n context_hip.WaitEvent(event_hip);\n event_hip.Finish();\n\n event_hip.Reset();\n event_hip.Record(HIP, &context_hip);\n event_hip.Wait(HIP, &context_hip);\n\n // CPU context waiting for HIP event\n context_cpu.WaitEvent(event_hip);\n\n // HIP context waiting for CPU event\n context_hip.WaitEvent(event_cpu);\n}\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \n\n#include \n#include \"caffe2/core/common_gpu.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\nclass JustTest : public OperatorBase {\n public:\n using OperatorBase::OperatorBase;\n bool Run(int /* unused */ /*stream_id*/) override {\n return true;\n }\n virtual std::string type() {\n return \"BASE\";\n }\n};\n\nclass JustTestCUDA : public JustTest {\n public:\n using JustTest::JustTest;\n bool Run(int /* unused */ /*stream_id*/) override {\n return true;\n }\n std::string type() override {\n return \"CUDA\";\n }\n};\n\nclass JustTestCUDNN : public JustTest {\n public:\n using JustTest::JustTest;\n bool Run(int /* unused */ /*stream_id*/) override {\n return true;\n }\n std::string type() override {\n return \"CUDNN\";\n }\n};\n\nOPERATOR_SCHEMA(JustTest).NumInputs(0, 1).NumOutputs(0, 1);\nREGISTER_CUDA_OPERATOR(JustTest, JustTestCUDA);\nREGISTER_CUDNN_OPERATOR(JustTest, JustTestCUDNN);\n\nTEST(EnginePrefTest, GPUDeviceDefaultPreferredEngines) {\n if (!HasCudaGPU())\n return;\n OperatorDef op_def;\n Workspace ws;\n op_def.mutable_device_option()->set_device_type(PROTO_CUDA);\n op_def.set_type(\"JustTest\");\n\n {\n const auto op = CreateOperator(op_def, &ws);\n EXPECT_NE(nullptr, op.get());\n // CUDNN should be taken as it's in the default global preferred engines\n // list\n EXPECT_EQ(static_cast(op.get())->type(), \"CUDNN\");\n }\n}\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \n\n#include \n#include \"caffe2/core/hip/common_gpu.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\nclass JustTest : public OperatorBase {\n public:\n using OperatorBase::OperatorBase;\n bool Run(int /* unused */ /*stream_id*/) override {\n return true;\n }\n virtual std::string type() {\n return \"BASE\";\n }\n};\n\nclass JustTestHIP : public JustTest {\n public:\n using JustTest::JustTest;\n bool Run(int /* unused */ /*stream_id*/) override {\n return true;\n }\n std::string type() override {\n return \"HIP\";\n }\n};\n\nclass JustTestMIOPEN : public JustTest {\n public:\n using JustTest::JustTest;\n bool Run(int /* unused */ /*stream_id*/) override {\n return true;\n }\n std::string type() override {\n return \"MIOPEN\";\n }\n};\n\nOPERATOR_SCHEMA(JustTest).NumInputs(0, 1).NumOutputs(0, 1);\nREGISTER_HIP_OPERATOR(JustTest, JustTestHIP);\nREGISTER_MIOPEN_OPERATOR(JustTest, JustTestMIOPEN);\n\nTEST(EnginePrefTest, GPUDeviceDefaultPreferredEngines) {\n if (!HasHipGPU())\n return;\n OperatorDef op_def;\n Workspace ws;\n op_def.mutable_device_option()->set_device_type(PROTO_HIP);\n op_def.set_type(\"JustTest\");\n\n {\n const auto op = CreateOperator(op_def, &ws);\n EXPECT_NE(nullptr, op.get());\n // MIOPEN should be taken as it's in the default global preferred engines\n // list\n EXPECT_EQ(static_cast(op.get())->type(), \"MIOPEN\");\n }\n}\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/db/create_db_op.h\"\n\nnamespace caffe2 {\nREGISTER_CUDA_OPERATOR(CreateDB, CreateDBOp);\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/db/create_db_op.h\"\n\nnamespace caffe2 {\nREGISTER_HIP_OPERATOR(CreateDB, CreateDBOp);\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/distributed/file_store_handler_op.h\"\n\n#if !defined(USE_ROCM)\n#include \n#else\n#include \n#endif\n\nnamespace caffe2 {\n\n#if !defined(USE_ROCM)\nREGISTER_CUDA_OPERATOR(\n FileStoreHandlerCreate,\n FileStoreHandlerCreateOp);\n#else\nREGISTER_HIP_OPERATOR(\n FileStoreHandlerCreate,\n FileStoreHandlerCreateOp);\n#endif\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/distributed/file_store_handler_op.h\"\n\n#if !defined(USE_ROCM)\n#include \n#else\n#include \n#endif\n\nnamespace caffe2 {\n\n#if !defined(USE_ROCM)\nREGISTER_HIP_OPERATOR(\n FileStoreHandlerCreate,\n FileStoreHandlerCreateOp);\n#else\nREGISTER_HIP_OPERATOR(\n FileStoreHandlerCreate,\n FileStoreHandlerCreateOp);\n#endif\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/distributed/redis_store_handler_op.h\"\n\n#if !defined(USE_ROCM)\n#include \n#else\n#include \n#endif\n\nnamespace caffe2 {\n\n#if !defined(USE_ROCM)\nREGISTER_CUDA_OPERATOR(\n RedisStoreHandlerCreate,\n RedisStoreHandlerCreateOp);\n#else\nREGISTER_HIP_OPERATOR(\n RedisStoreHandlerCreate,\n RedisStoreHandlerCreateOp);\n#endif\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/distributed/redis_store_handler_op.h\"\n\n#if !defined(USE_ROCM)\n#include \n#else\n#include \n#endif\n\nnamespace caffe2 {\n\n#if !defined(USE_ROCM)\nREGISTER_HIP_OPERATOR(\n RedisStoreHandlerCreate,\n RedisStoreHandlerCreateOp);\n#else\nREGISTER_HIP_OPERATOR(\n RedisStoreHandlerCreate,\n RedisStoreHandlerCreateOp);\n#endif\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/common_gpu.h\"\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/image/image_input_op.h\"\n\nnamespace caffe2 {\n\ntemplate <>\nbool ImageInputOp::ApplyTransformOnGPU(\n const std::vector& dims,\n const c10::Device& type) {\n // GPU transform kernel allows explicitly setting output type\n if (output_type_ == TensorProto_DataType_FLOAT) {\n auto* image_output =\n OperatorBase::OutputTensor(0, dims, at::dtype().device(type));\n TransformOnGPU(\n prefetched_image_on_device_,\n image_output,\n mean_gpu_,\n std_gpu_,\n &context_);\n } else if (output_type_ == TensorProto_DataType_FLOAT16) {\n auto* image_output =\n OperatorBase::OutputTensor(0, dims, at::dtype().device(type));\n TransformOnGPU(\n prefetched_image_on_device_,\n image_output,\n mean_gpu_,\n std_gpu_,\n &context_);\n } else {\n return false;\n }\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(ImageInput, ImageInputOp);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/common_gpu.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/image/image_input_op.h\"\n\nnamespace caffe2 {\n\ntemplate <>\nbool ImageInputOp::ApplyTransformOnGPU(\n const std::vector& dims,\n const c10::Device& type) {\n // GPU transform kernel allows explicitly setting output type\n if (output_type_ == TensorProto_DataType_FLOAT) {\n auto* image_output =\n OperatorBase::OutputTensor(0, dims, at::dtype().device(type));\n TransformOnGPU(\n prefetched_image_on_device_,\n image_output,\n mean_gpu_,\n std_gpu_,\n &context_);\n } else if (output_type_ == TensorProto_DataType_FLOAT16) {\n auto* image_output =\n OperatorBase::OutputTensor(0, dims, at::dtype().device(type));\n TransformOnGPU(\n prefetched_image_on_device_,\n image_output,\n mean_gpu_,\n std_gpu_,\n &context_);\n } else {\n return false;\n }\n return true;\n}\n\nREGISTER_HIP_OPERATOR(ImageInput, ImageInputOp);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/image/transform_gpu.h\"\n#include \"caffe2/utils/conversions.h\"\n\n/**\n *\n * Copyright (c) 2016, NVIDIA CORPORATION, All rights reserved\n * Distributed under 2-clause BSD license; see accompanying LICENSE file\n *\n **/\n\nnamespace caffe2 {\n\nnamespace {\n\n// input in (int8, NHWC), output in (fp32, NCHW)\ntemplate \n__global__ void transform_kernel(\n const int C,\n const int H,\n const int W,\n const float* mean,\n const float* std,\n const In* in,\n Out* out) {\n const auto n = blockIdx.x;\n\n const auto nStride = C*H*W;\n\n // pointers to data for this image\n const In *const input_ptr = &in[n*nStride];\n Out *const output_ptr = &out[n*nStride];\n\n // either read or write uncoalesced - try reading\n for (int c=0; c < C; ++c) {\n for (int h=threadIdx.y; h < H; h += blockDim.y) {\n for (int w=threadIdx.x; w < W; w += blockDim.x) {\n const int in_idx = c + C*w + C*W*h; // HWC\n const int out_idx = c*H*W + h*W + w; // CHW\n\n output_ptr[out_idx] = convert::To(\n (convert::To(input_ptr[in_idx])-mean[c]) * std[c]);\n }\n }\n }\n}\n\n}\n\ntemplate \n\nbool TransformOnGPU(\n Tensor& X,\n Tensor* Y,\n Tensor& mean,\n Tensor& std,\n Context* context) {\n const int N = X.dim32(0), C = X.dim32(3), H = X.dim32(1), W = X.dim32(2);\n auto* input_data = X.template data();\n auto* output_data = Y->template mutable_data();\n\n transform_kernel<\n T_IN, T_OUT><<cuda_stream()>>>(\n C, H, W, mean.template data(), std.template data(),\n input_data, output_data);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n};\n\ntemplate bool TransformOnGPU(\n Tensor& X,\n Tensor* Y,\n Tensor& mean,\n Tensor& std,\n CUDAContext* context);\n\ntemplate bool TransformOnGPU(\n Tensor& X,\n Tensor* Y,\n Tensor& mean,\n Tensor& std,\n CUDAContext* context);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/image/transform_gpu.h\"\n#include \"caffe2/utils/conversions.h\"\n\n/**\n *\n * Copyright (c) 2016, NVIDIA CORPORATION, All rights reserved\n * Distributed under 2-clause BSD license; see accompanying LICENSE file\n *\n **/\n\nnamespace caffe2 {\n\nnamespace {\n\n// input in (int8, NHWC), output in (fp32, NCHW)\ntemplate \n__global__ void transform_kernel(\n const int C,\n const int H,\n const int W,\n const float* mean,\n const float* std,\n const In* in,\n Out* out) {\n const auto n = blockIdx.x;\n\n const auto nStride = C*H*W;\n\n // pointers to data for this image\n const In *const input_ptr = &in[n*nStride];\n Out *const output_ptr = &out[n*nStride];\n\n // either read or write uncoalesced - try reading\n for (int c=0; c < C; ++c) {\n for (int h=threadIdx.y; h < H; h += blockDim.y) {\n for (int w=threadIdx.x; w < W; w += blockDim.x) {\n const int in_idx = c + C*w + C*W*h; // HWC\n const int out_idx = c*H*W + h*W + w; // CHW\n\n output_ptr[out_idx] = convert::To(\n (convert::To(input_ptr[in_idx])-mean[c]) * std[c]);\n }\n }\n }\n}\n\n}\n\ntemplate \n\nbool TransformOnGPU(\n Tensor& X,\n Tensor* Y,\n Tensor& mean,\n Tensor& std,\n Context* context) {\n const int N = X.dim32(0), C = X.dim32(3), H = X.dim32(1), W = X.dim32(2);\n auto* input_data = X.template data();\n auto* output_data = Y->template mutable_data();\n\n hipLaunchKernelGGL(( transform_kernel<\n T_IN, T_OUT>), dim3(N), dim3(dim3(16, 16)), 0, context->hip_stream(), \n C, H, W, mean.template data(), std.template data(),\n input_data, output_data);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n};\n\ntemplate bool TransformOnGPU(\n Tensor& X,\n Tensor* Y,\n Tensor& mean,\n Tensor& std,\n HIPContext* context);\n\ntemplate bool TransformOnGPU(\n Tensor& X,\n Tensor* Y,\n Tensor& mean,\n Tensor& std,\n HIPContext* context);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#ifndef CAFFE2_IMAGE_TRANSFORM_GPU_H_\n#define CAFFE2_IMAGE_TRANSFORM_GPU_H_\n\n/**\n *\n * Copyright (c) 2016, NVIDIA CORPORATION, All rights reserved\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n *\n * 1. Redistributions of source code must retain the above copyright notice, this\n * list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright notice,\n * this list of conditions and the following disclaimer in the documentation\n * and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n **/\n\n#include \"caffe2/core/context.h\"\n\nnamespace caffe2 {\n\ntemplate \nbool TransformOnGPU(\n Tensor& X,\n Tensor* Y,\n Tensor& mean,\n Tensor& std,\n Context* context);\n\n} // namespace caffe2\n\n#endif\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#ifndef CAFFE2_IMAGE_TRANSFORM_GPU_H_\n#define CAFFE2_IMAGE_TRANSFORM_GPU_H_\n\n/**\n *\n * Copyright (c) 2016, NVIDIA CORPORATION, All rights reserved\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n *\n * 1. Redistributions of source code must retain the above copyright notice, this\n * list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright notice,\n * this list of conditions and the following disclaimer in the documentation\n * and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n **/\n\n#include \"caffe2/core/context.h\"\n\nnamespace caffe2 {\n\ntemplate \nbool TransformOnGPU(\n Tensor& X,\n Tensor* Y,\n Tensor& mean,\n Tensor& std,\n Context* context);\n\n} // namespace caffe2\n\n#endif\n###" }, { "cuda": "\n#include \"caffe2/operators/abs_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void\nAbsGradientCUDAKernel(const int N, const T* dY, const T* X, T* dX) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n#if __CUDA_ARCH__ >= 350\n dX[i] = __ldg(X + i) == T(0)\n ? T(0)\n : (__ldg(X + i) > T(0) ? __ldg(dY + i) : -__ldg(dY + i));\n#else\n dX[i] = X[i] == T(0) ? T(0) : (X[i] > T(0) ? dY[i] : -dY[i]);\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool AbsGradientFunctor::Forward(\n const std::vector& X_dims,\n const std::vector& /* dY_dims */,\n const T* X,\n const T* dY,\n T* dX,\n CUDAContext* context) const {\n const int size = std::accumulate(\n X_dims.cbegin(), X_dims.cend(), 1, std::multiplies());\n AbsGradientCUDAKernel\n <<cuda_stream()>>>(size, dY, X, dX);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(\n Abs,\n UnaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n AbsFunctor>);\nREGISTER_CUDA_OPERATOR(\n AbsGradient,\n BinaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n AbsGradientFunctor>);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/operators/abs_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void\nAbsGradientHIPKernel(const int N, const T* dY, const T* X, T* dX) {\n HIP_1D_KERNEL_LOOP(i, N) {\n#if __HIP_ARCH__ >= 350\n dX[i] = __ldg(X + i) == T(0)\n ? T(0)\n : (__ldg(X + i) > T(0) ? __ldg(dY + i) : -__ldg(dY + i));\n#else\n dX[i] = X[i] == T(0) ? T(0) : (X[i] > T(0) ? dY[i] : -dY[i]);\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool AbsGradientFunctor::Forward(\n const std::vector& X_dims,\n const std::vector& /* dY_dims */,\n const T* X,\n const T* dY,\n T* dX,\n HIPContext* context) const {\n const int size = std::accumulate(\n X_dims.cbegin(), X_dims.cend(), 1, std::multiplies());\n hipLaunchKernelGGL(( AbsGradientHIPKernel)\n , dim3(CAFFE_GET_BLOCKS(size)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context->hip_stream(), size, dY, X, dX);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(\n Abs,\n UnaryElementwiseOp<\n TensorTypes,\n HIPContext,\n AbsFunctor>);\nREGISTER_HIP_OPERATOR(\n AbsGradient,\n BinaryElementwiseOp<\n TensorTypes,\n HIPContext,\n AbsGradientFunctor>);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/accumulate_op.h\"\n\nnamespace caffe2 {\nREGISTER_CUDA_OPERATOR(Accumulate, AccumulateOp);\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/accumulate_op.h\"\n\nnamespace caffe2 {\nREGISTER_HIP_OPERATOR(Accumulate, AccumulateOp);\n} // namespace caffe2\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#define _USE_MATH_DEFINES\n\n#include \n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\nnamespace {\n\nvoid hardshrink_kernel(TensorIteratorBase& iter, const Scalar& value) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half,\n at::ScalarType::BFloat16,\n iter.dtype(),\n \"hardshrink_cuda\",\n [&]() {\n auto lambd = value.to();\n gpu_kernel(iter, [lambd] GPU_LAMBDA(scalar_t a) -> scalar_t {\n return (a >= -lambd && a <= lambd) ? scalar_t(0) : a;\n });\n });\n}\n} // namespace\n\nREGISTER_DISPATCH(hardshrink_stub, &hardshrink_kernel);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#define _USE_MATH_DEFINES\n\n#include \n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\nnamespace {\n\nvoid hardshrink_kernel(TensorIteratorBase& iter, const Scalar& value) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half,\n at::ScalarType::BFloat16,\n iter.dtype(),\n \"hardshrink_hip\",\n [&]() {\n auto lambd = value.to();\n gpu_kernel(iter, [lambd] GPU_LAMBDA(scalar_t a) -> scalar_t {\n return (a >= -lambd && a <= lambd) ? scalar_t(0) : a;\n });\n });\n}\n} // namespace\n\nREGISTER_DISPATCH(hardshrink_stub, &hardshrink_kernel);\n\n} // namespace at::native\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/accuracy_op.h\"\n#include \"caffe2/utils/GpuAtomics.cuh\"\n#include \"caffe2/utils/math.h\"\n\n#include \"caffe2/utils/cub_namespace.cuh\"\n#include \n\nnamespace caffe2 {\n\nnamespace {\n__global__ void AccuracyKernel(\n const int N,\n const int D,\n const int top_k,\n const float* Xdata,\n const int* labelData,\n float* accuracy) {\n typedef cub::BlockReduce BlockReduce;\n __shared__ typename BlockReduce::TempStorage temp_storage;\n int correct = 0;\n for (int row = blockIdx.x; row < N; row += gridDim.x) {\n const int label = labelData[row];\n const float label_pred = Xdata[row * D + label];\n int ngt = 0;\n for (int col = threadIdx.x; col < D; col += blockDim.x) {\n const float pred = Xdata[row * D + col];\n if (pred > label_pred || (pred == label_pred && col <= label)) {\n ++ngt;\n }\n }\n ngt = BlockReduce(temp_storage).Sum(ngt);\n if (ngt <= top_k) {\n ++correct;\n }\n __syncthreads();\n }\n if (threadIdx.x == 0) {\n gpu_atomic_add(accuracy, static_cast(correct));\n }\n}\n\n__global__ void AccuracyDivideKernel(const int N, float* accuracy) {\n *accuracy /= N;\n}\n} // namespace\n\ntemplate <>\nbool AccuracyOp::RunOnDevice() {\n auto& X = Input(PREDICTION);\n auto& label = Input(LABEL);\n\n CAFFE_ENFORCE_EQ(X.dim(), 2);\n int N = X.dim32(0);\n int D = X.dim32(1);\n CAFFE_ENFORCE_EQ(label.dim(), 1);\n CAFFE_ENFORCE_EQ(label.dim32(0), N);\n auto* Y = Output(0, vector(), at::dtype());\n float* Ydata = Y->template mutable_data();\n math::Set(1, 0, Ydata, &context_);\n AccuracyKernel<<<\n std::min(CAFFE_MAXIMUM_NUM_BLOCKS, N),\n CAFFE_CUDA_NUM_THREADS,\n 0,\n context_.cuda_stream()>>>(\n N, D, top_k_, X.data(), label.data(), Ydata);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n // This is going to be executed only in one single kernel. Not very beautiful,\n // but probably we have to do this?\n AccuracyDivideKernel<<<1, 1, 0, context_.cuda_stream()>>>(\n N, Ydata);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(Accuracy, AccuracyOp);\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/accuracy_op.h\"\n#include \"caffe2/utils/hip/GpuAtomics.cuh\"\n#include \"caffe2/utils/math.h\"\n\n#include \"caffe2/utils/cub_namespace.cuh\"\n#include \n\nnamespace caffe2 {\n\nnamespace {\n__global__ void AccuracyKernel(\n const int N,\n const int D,\n const int top_k,\n const float* Xdata,\n const int* labelData,\n float* accuracy) {\n typedef hipcub::BlockReduce BlockReduce;\n __shared__ typename BlockReduce::TempStorage temp_storage;\n int correct = 0;\n for (int row = blockIdx.x; row < N; row += gridDim.x) {\n const int label = labelData[row];\n const float label_pred = Xdata[row * D + label];\n int ngt = 0;\n for (int col = threadIdx.x; col < D; col += blockDim.x) {\n const float pred = Xdata[row * D + col];\n if (pred > label_pred || (pred == label_pred && col <= label)) {\n ++ngt;\n }\n }\n ngt = BlockReduce(temp_storage).Sum(ngt);\n if (ngt <= top_k) {\n ++correct;\n }\n __syncthreads();\n }\n if (threadIdx.x == 0) {\n gpu_atomic_add(accuracy, static_cast(correct));\n }\n}\n\n__global__ void AccuracyDivideKernel(const int N, float* accuracy) {\n *accuracy /= N;\n}\n} // namespace\n\ntemplate <>\nbool AccuracyOp::RunOnDevice() {\n auto& X = Input(PREDICTION);\n auto& label = Input(LABEL);\n\n CAFFE_ENFORCE_EQ(X.dim(), 2);\n int N = X.dim32(0);\n int D = X.dim32(1);\n CAFFE_ENFORCE_EQ(label.dim(), 1);\n CAFFE_ENFORCE_EQ(label.dim32(0), N);\n auto* Y = Output(0, vector(), at::dtype());\n float* Ydata = Y->template mutable_data();\n math::Set(1, 0, Ydata, &context_);\n hipLaunchKernelGGL(( AccuracyKernel), \n dim3(::min(CAFFE_MAXIMUM_NUM_BLOCKS, N)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context_.hip_stream(), \n N, D, top_k_, X.data(), label.data(), Ydata);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n // This is going to be executed only in one single kernel. Not very beautiful,\n // but probably we have to do this?\n hipLaunchKernelGGL(( AccuracyDivideKernel), dim3(1), dim3(1), 0, context_.hip_stream(), \n N, Ydata);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(Accuracy, AccuracyOp);\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/operators/acos_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\n__global__ void AcosGradientCUDAKernel(\n const int N,\n const float* dY,\n const float* X,\n float* dX) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n#if __CUDA_ARCH__ >= 350\n dX[i] = -__ldg(dY + i) * rsqrtf(1.0f - __ldg(X + i) * __ldg(X + i));\n#else\n dX[i] = -dY[i] * rsqrtf(1.0f - X[i] * X[i]);\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool AcosGradientFunctor::Forward(\n const std::vector& X_dims,\n const std::vector& /* dY_dims */,\n const T* X,\n const T* dY,\n T* dX,\n CUDAContext* context) const {\n const int size = std::accumulate(\n X_dims.cbegin(), X_dims.cend(), 1, std::multiplies());\n AcosGradientCUDAKernel<<<\n CAFFE_GET_BLOCKS(size),\n CAFFE_CUDA_NUM_THREADS,\n 0,\n context->cuda_stream()>>>(size, dY, X, dX);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(\n Acos,\n UnaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n AcosFunctor>);\nREGISTER_CUDA_OPERATOR(\n AcosGradient,\n BinaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n AcosGradientFunctor>);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/operators/acos_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\n__global__ void AcosGradientHIPKernel(\n const int N,\n const float* dY,\n const float* X,\n float* dX) {\n HIP_1D_KERNEL_LOOP(i, N) {\n#if __HIP_ARCH__ >= 350\n dX[i] = -__ldg(dY + i) * rsqrtf(1.0f - __ldg(X + i) * __ldg(X + i));\n#else\n dX[i] = -dY[i] * rsqrtf(1.0f - X[i] * X[i]);\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool AcosGradientFunctor::Forward(\n const std::vector& X_dims,\n const std::vector& /* dY_dims */,\n const T* X,\n const T* dY,\n T* dX,\n HIPContext* context) const {\n const int size = std::accumulate(\n X_dims.cbegin(), X_dims.cend(), 1, std::multiplies());\n hipLaunchKernelGGL(( AcosGradientHIPKernel), \n dim3(CAFFE_GET_BLOCKS(size)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context->hip_stream(), size, dY, X, dX);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(\n Acos,\n UnaryElementwiseOp<\n TensorTypes,\n HIPContext,\n AcosFunctor>);\nREGISTER_HIP_OPERATOR(\n AcosGradient,\n BinaryElementwiseOp<\n TensorTypes,\n HIPContext,\n AcosGradientFunctor>);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/alias_with_name.h\"\n\nnamespace caffe2 {\n\nREGISTER_CUDA_OPERATOR(AliasWithName, AliasWithNameOp);\n\n} // namespace caffe2\n\nC10_EXPORT_CAFFE2_OP_TO_C10_CUDA(\n AliasWithName,\n caffe2::AliasWithNameOp);\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/alias_with_name.h\"\n\nnamespace caffe2 {\n\nREGISTER_HIP_OPERATOR(AliasWithName, AliasWithNameOp);\n\n} // namespace caffe2\n\nC10_EXPORT_CAFFE2_OP_TO_C10_HIP(\n AliasWithName,\n caffe2::AliasWithNameOp);\n###" }, { "cuda": "\n#include \"caffe2/operators/asin_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\n__global__ void AsinGradientCUDAKernel(\n const int N,\n const float* dY,\n const float* X,\n float* dX) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n#if __CUDA_ARCH__ >= 350\n dX[i] = __ldg(dY + i) * rsqrtf(1.0f - __ldg(X + i) * __ldg(X + i));\n#else\n dX[i] = dY[i] * rsqrtf(1.0f - X[i] * X[i]);\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool AsinGradientFunctor::Forward(\n const std::vector& X_dims,\n const std::vector& /* dY_dims */,\n const T* X,\n const T* dY,\n T* dX,\n CUDAContext* context) const {\n const int size = std::accumulate(\n X_dims.cbegin(), X_dims.cend(), 1, std::multiplies());\n AsinGradientCUDAKernel<<<\n CAFFE_GET_BLOCKS(size),\n CAFFE_CUDA_NUM_THREADS,\n 0,\n context->cuda_stream()>>>(size, dY, X, dX);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(\n Asin,\n UnaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n AsinFunctor>);\nREGISTER_CUDA_OPERATOR(\n AsinGradient,\n BinaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n AsinGradientFunctor>);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/operators/asin_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\n__global__ void AsinGradientHIPKernel(\n const int N,\n const float* dY,\n const float* X,\n float* dX) {\n HIP_1D_KERNEL_LOOP(i, N) {\n#if __HIP_ARCH__ >= 350\n dX[i] = __ldg(dY + i) * rsqrtf(1.0f - __ldg(X + i) * __ldg(X + i));\n#else\n dX[i] = dY[i] * rsqrtf(1.0f - X[i] * X[i]);\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool AsinGradientFunctor::Forward(\n const std::vector& X_dims,\n const std::vector& /* dY_dims */,\n const T* X,\n const T* dY,\n T* dX,\n HIPContext* context) const {\n const int size = std::accumulate(\n X_dims.cbegin(), X_dims.cend(), 1, std::multiplies());\n hipLaunchKernelGGL(( AsinGradientHIPKernel), \n dim3(CAFFE_GET_BLOCKS(size)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context->hip_stream(), size, dY, X, dX);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(\n Asin,\n UnaryElementwiseOp<\n TensorTypes,\n HIPContext,\n AsinFunctor>);\nREGISTER_HIP_OPERATOR(\n AsinGradient,\n BinaryElementwiseOp<\n TensorTypes,\n HIPContext,\n AsinGradientFunctor>);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/assert_op.h\"\n\nnamespace caffe2 {\n\nREGISTER_CUDA_OPERATOR(Assert, AssertOp);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/assert_op.h\"\n\nnamespace caffe2 {\n\nREGISTER_HIP_OPERATOR(Assert, AssertOp);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/async_net_barrier_op.h\"\n\nnamespace caffe2 {\n\nREGISTER_CUDA_OPERATOR(AsyncNetBarrier, AsyncNetBarrierOp);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/async_net_barrier_op.h\"\n\nnamespace caffe2 {\n\nREGISTER_HIP_OPERATOR(AsyncNetBarrier, AsyncNetBarrierOp);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/operators/atan_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void\nAtanGradientCUDAKernel(const int N, const T* dY, const T* X, T* dX) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n#if __CUDA_ARCH__ >= 350\n dX[i] = __ldg(dY + i) / (T(1) + __ldg(X + i) * __ldg(X + i));\n#else\n dX[i] = dY[i] / (T(1) + X[i] * X[i]);\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool AtanGradientFunctor::Forward(\n const std::vector& X_dims,\n const std::vector& /* dY_dims */,\n const T* X,\n const T* dY,\n T* dX,\n CUDAContext* context) const {\n const int size = std::accumulate(\n X_dims.cbegin(), X_dims.cend(), 1, std::multiplies());\n AtanGradientCUDAKernel\n <<cuda_stream()>>>(size, dY, X, dX);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(\n Atan,\n UnaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n AtanFunctor>);\nREGISTER_CUDA_OPERATOR(\n AtanGradient,\n BinaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n AtanGradientFunctor>);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/operators/atan_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void\nAtanGradientHIPKernel(const int N, const T* dY, const T* X, T* dX) {\n HIP_1D_KERNEL_LOOP(i, N) {\n#if __HIP_ARCH__ >= 350\n dX[i] = __ldg(dY + i) / (T(1) + __ldg(X + i) * __ldg(X + i));\n#else\n dX[i] = dY[i] / (T(1) + X[i] * X[i]);\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool AtanGradientFunctor::Forward(\n const std::vector& X_dims,\n const std::vector& /* dY_dims */,\n const T* X,\n const T* dY,\n T* dX,\n HIPContext* context) const {\n const int size = std::accumulate(\n X_dims.cbegin(), X_dims.cend(), 1, std::multiplies());\n hipLaunchKernelGGL(( AtanGradientHIPKernel)\n , dim3(CAFFE_GET_BLOCKS(size)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context->hip_stream(), size, dY, X, dX);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(\n Atan,\n UnaryElementwiseOp<\n TensorTypes,\n HIPContext,\n AtanFunctor>);\nREGISTER_HIP_OPERATOR(\n AtanGradient,\n BinaryElementwiseOp<\n TensorTypes,\n HIPContext,\n AtanGradientFunctor>);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/operators/batch_matmul_op.h\"\n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\ntemplate <>\nbool BatchMatMulOp::RunOnDevice() {\n return DispatchHelper>::call(this, Input(0));\n}\n\nREGISTER_CUDA_OPERATOR(BatchMatMul, BatchMatMulOp);\n\n\n#if !defined(USE_ROCM)\n\ntemplate <>\nbool BatchMatMulOp::RunOnDevice() {\n return DispatchHelper>::call(this, Input(0));\n}\n\nREGISTER_CUDA_OPERATOR_WITH_ENGINE(\n BatchMatMul,\n TENSORCORE,\n BatchMatMulOp);\n\n#endif\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/operators/batch_matmul_op.h\"\n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\ntemplate <>\nbool BatchMatMulOp::RunOnDevice() {\n return DispatchHelper>::call(this, Input(0));\n}\n\nREGISTER_HIP_OPERATOR(BatchMatMul, BatchMatMulOp);\n\n\n#if !defined(USE_ROCM)\n\ntemplate <>\nbool BatchMatMulOp::RunOnDevice() {\n return DispatchHelper>::call(this, Input(0));\n}\n\nREGISTER_HIP_OPERATOR_WITH_ENGINE(\n BatchMatMul,\n TENSORCORE,\n BatchMatMulOp);\n\n#endif\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/bucketize_op.h\"\n\n#include \n#include \n\nnamespace caffe2 {\n\n__global__ void BucketizeOpKernel(\n const int N,\n const int M,\n const float* bounds,\n const float* X,\n int32_t* out) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n int32_t low = -1, high = M;\n while (high - low > 1) {\n const int32_t median = low + (high - low) / 2;\n if (bounds[median] < X[i]) {\n low = median;\n } else {\n high = median;\n }\n }\n out[i] = high;\n }\n}\n\ntemplate <>\nbool BucketizeOp::RunOnDevice() {\n auto& input = Input(X);\n CAFFE_ENFORCE_GE(input.dim(), 1);\n\n auto N = input.numel();\n auto* output = Output(INDICES, input.sizes(), at::dtype());\n const auto* input_data = input.template data();\n auto* output_data = output->template mutable_data();\n\n BucketizeOpKernel<<<\n CAFFE_GET_BLOCKS(N),\n CAFFE_CUDA_NUM_THREADS,\n 0,\n context_.cuda_stream()>>>(\n N,\n boundaries_device_.numel(),\n boundaries_device_.data(),\n input_data,\n output_data);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n};\n\nREGISTER_CUDA_OPERATOR(Bucketize, BucketizeOp);\n} // namespace caffe2\n\nusing BucketizeCUDA = caffe2::BucketizeOp;\n\nC10_EXPORT_CAFFE2_OP_TO_C10_CUDA(\n Bucketize,\n BucketizeCUDA);\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/bucketize_op.h\"\n\n#include \n#include \n\nnamespace caffe2 {\n\n__global__ void BucketizeOpKernel(\n const int N,\n const int M,\n const float* bounds,\n const float* X,\n int32_t* out) {\n HIP_1D_KERNEL_LOOP(i, N) {\n int32_t low = -1, high = M;\n while (high - low > 1) {\n const int32_t median = low + (high - low) / 2;\n if (bounds[median] < X[i]) {\n low = median;\n } else {\n high = median;\n }\n }\n out[i] = high;\n }\n}\n\ntemplate <>\nbool BucketizeOp::RunOnDevice() {\n auto& input = Input(X);\n CAFFE_ENFORCE_GE(input.dim(), 1);\n\n auto N = input.numel();\n auto* output = Output(INDICES, input.sizes(), at::dtype());\n const auto* input_data = input.template data();\n auto* output_data = output->template mutable_data();\n\n hipLaunchKernelGGL(( BucketizeOpKernel), \n dim3(CAFFE_GET_BLOCKS(N)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context_.hip_stream(), \n N,\n boundaries_device_.numel(),\n boundaries_device_.data(),\n input_data,\n output_data);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n};\n\nREGISTER_HIP_OPERATOR(Bucketize, BucketizeOp);\n} // namespace caffe2\n\nusing BucketizeHIP = caffe2::BucketizeOp;\n\nC10_EXPORT_CAFFE2_OP_TO_C10_HIP(\n Bucketize,\n BucketizeHIP);\n###" }, { "cuda": "\n#include \"caffe2/operators/cbrt_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void\nCbrtGradientCUDAKernel(const int N, const T* dY, const T* Y, T* dX) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n#if __CUDA_ARCH__ >= 350\n dX[i] = __ldg(dY + i) / (__ldg(Y + i) * __ldg(Y + i) * T(3));\n#else\n dX[i] = dY[i] / (Y[i] * Y[i] * T(3));\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool CbrtGradientFunctor::Forward(\n const std::vector& dY_dims,\n const std::vector& /* Y_dims */,\n const T* dY,\n const T* Y,\n T* dX,\n CUDAContext* context) const {\n const int size = std::accumulate(\n dY_dims.cbegin(), dY_dims.cend(), 1, std::multiplies());\n CbrtGradientCUDAKernel\n <<cuda_stream()>>>(size, dY, Y, dX);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(\n Cbrt,\n UnaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n CbrtFunctor>);\nREGISTER_CUDA_OPERATOR(\n CbrtGradient,\n BinaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n CbrtGradientFunctor>);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/operators/cbrt_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void\nCbrtGradientHIPKernel(const int N, const T* dY, const T* Y, T* dX) {\n HIP_1D_KERNEL_LOOP(i, N) {\n#if __HIP_ARCH__ >= 350\n dX[i] = __ldg(dY + i) / (__ldg(Y + i) * __ldg(Y + i) * T(3));\n#else\n dX[i] = dY[i] / (Y[i] * Y[i] * T(3));\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool CbrtGradientFunctor::Forward(\n const std::vector& dY_dims,\n const std::vector& /* Y_dims */,\n const T* dY,\n const T* Y,\n T* dX,\n HIPContext* context) const {\n const int size = std::accumulate(\n dY_dims.cbegin(), dY_dims.cend(), 1, std::multiplies());\n hipLaunchKernelGGL(( CbrtGradientHIPKernel)\n , dim3(CAFFE_GET_BLOCKS(size)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context->hip_stream(), size, dY, Y, dX);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(\n Cbrt,\n UnaryElementwiseOp<\n TensorTypes,\n HIPContext,\n CbrtFunctor>);\nREGISTER_HIP_OPERATOR(\n CbrtGradient,\n BinaryElementwiseOp<\n TensorTypes,\n HIPContext,\n CbrtGradientFunctor>);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#define _USE_MATH_DEFINES\n\n#include \n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\nnamespace {\n\nvoid hardsigmoid_kernel(TensorIteratorBase& iter) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half,\n at::ScalarType::BFloat16,\n iter.dtype(),\n \"hardsigmoid_cuda\",\n [&]() {\n using opmath_t = at::opmath_type;\n const opmath_t zero(0.0f);\n const opmath_t one_sixth(1.0f / 6.0f);\n const opmath_t three(3.0f);\n const opmath_t six(6.0f);\n gpu_kernel(\n iter,\n [zero, one_sixth, three, six] GPU_LAMBDA(\n scalar_t self_val) -> scalar_t {\n opmath_t x = static_cast(self_val);\n return std::min(std::max(x + three, zero), six) * one_sixth;\n });\n });\n}\n\nvoid hardsigmoid_backward_kernel(TensorIteratorBase& iter) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half,\n at::ScalarType::BFloat16,\n iter.dtype(),\n \"hardsigmoid_backward_cuda\",\n [&]() {\n using opmath_t = at::opmath_type;\n const opmath_t zero(0.0f);\n const opmath_t three(3.0f);\n const opmath_t neg_three(-3.0f);\n const opmath_t one_sixth(1.0f / 6.0f);\n gpu_kernel(\n iter,\n [zero, three, neg_three, one_sixth] GPU_LAMBDA(\n scalar_t grad_val_, scalar_t self_val_) -> scalar_t {\n opmath_t grad_val = static_cast(grad_val_);\n opmath_t self_val = static_cast(self_val_);\n return (self_val > neg_three && self_val < three)\n ? grad_val * one_sixth\n : zero;\n });\n });\n}\n\n} // namespace\n\nREGISTER_DISPATCH(hardsigmoid_stub, &hardsigmoid_kernel);\nREGISTER_DISPATCH(hardsigmoid_backward_stub, &hardsigmoid_backward_kernel);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#define _USE_MATH_DEFINES\n\n#include \n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\nnamespace {\n\nvoid hardsigmoid_kernel(TensorIteratorBase& iter) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half,\n at::ScalarType::BFloat16,\n iter.dtype(),\n \"hardsigmoid_hip\",\n [&]() {\n using opmath_t = at::opmath_type;\n const opmath_t zero(0.0f);\n const opmath_t one_sixth(1.0f / 6.0f);\n const opmath_t three(3.0f);\n const opmath_t six(6.0f);\n gpu_kernel(\n iter,\n [zero, one_sixth, three, six] GPU_LAMBDA(\n scalar_t self_val) -> scalar_t {\n opmath_t x = static_cast(self_val);\n return ::min(::max(x + three, zero), six) * one_sixth;\n });\n });\n}\n\nvoid hardsigmoid_backward_kernel(TensorIteratorBase& iter) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half,\n at::ScalarType::BFloat16,\n iter.dtype(),\n \"hardsigmoid_backward_hip\",\n [&]() {\n using opmath_t = at::opmath_type;\n const opmath_t zero(0.0f);\n const opmath_t three(3.0f);\n const opmath_t neg_three(-3.0f);\n const opmath_t one_sixth(1.0f / 6.0f);\n gpu_kernel(\n iter,\n [zero, three, neg_three, one_sixth] GPU_LAMBDA(\n scalar_t grad_val_, scalar_t self_val_) -> scalar_t {\n opmath_t grad_val = static_cast(grad_val_);\n opmath_t self_val = static_cast(self_val_);\n return (self_val > neg_three && self_val < three)\n ? grad_val * one_sixth\n : zero;\n });\n });\n}\n\n} // namespace\n\nREGISTER_DISPATCH(hardsigmoid_stub, &hardsigmoid_kernel);\nREGISTER_DISPATCH(hardsigmoid_backward_stub, &hardsigmoid_backward_kernel);\n\n} // namespace at::native\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/ceil_op.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate \n__global__ void CeilKernel(const int N, const T* X, T* Y) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n Y[i] = std::ceil(X[i]);\n }\n}\n\ntemplate <>\nbool CeilOp::RunOnDevice() {\n auto& X = Input(0);\n\n CAFFE_ENFORCE_GT(X.numel(), 0);\n auto* Y = Output(0, X.sizes(), at::dtype());\n CeilKernel<<<\n CAFFE_GET_BLOCKS(X.numel()),\n CAFFE_CUDA_NUM_THREADS,\n 0,\n context_.cuda_stream()>>>(\n X.numel(), X.data(), Y->template mutable_data());\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(Ceil, CeilOp);\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/ceil_op.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate \n__global__ void CeilKernel(const int N, const T* X, T* Y) {\n HIP_1D_KERNEL_LOOP(i, N) {\n Y[i] = ::ceil(X[i]);\n }\n}\n\ntemplate <>\nbool CeilOp::RunOnDevice() {\n auto& X = Input(0);\n\n CAFFE_ENFORCE_GT(X.numel(), 0);\n auto* Y = Output(0, X.sizes(), at::dtype());\n hipLaunchKernelGGL(( CeilKernel), \n dim3(CAFFE_GET_BLOCKS(X.numel())),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context_.hip_stream(), \n X.numel(), X.data(), Y->template mutable_data());\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(Ceil, CeilOp);\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/clip_op.h\"\n\nnamespace caffe2 {\nnamespace {\n\ntemplate \n__device__ T cuda_min(T x, T y);\ntemplate \n__device__ T cuda_max(T x, T y);\ntemplate <>\n__device__ float cuda_min(float x, float y) { return fminf(x, y); }\ntemplate <>\n__device__ float cuda_max(float x, float y) { return fmaxf(x, y); }\n\n// Disabled since we don't use it right now.\n/*\ntemplate <>\n__device__ double cuda_min(double x, double y) { return fmin(x, y); }\ntemplate <>\n__device__ double cuda_max(double x, double y) { return fmax(x, y); }\n*/\n\n\ntemplate \n__global__ void ClipKernel(const int N, const T minval, const T maxval,\n const T* X, T* Y) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n Y[i] = cuda_min(cuda_max(X[i], minval), maxval);\n }\n}\n\ntemplate \n__global__ void ClipGradientKernel(const int N, const T minval,\n const T maxval, const T* Y,\n const T* dY, T* dX) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n dX[i] = dY[i] * (Y[i] > minval && Y[i] < maxval);\n }\n}\n} // namespace\n\ntemplate <>\nbool ClipOp::RunOnDevice() {\n auto& X = Input(0);\n\n CAFFE_ENFORCE_GE(X.numel(), 0);\n auto* Y = Output(0, X.sizes(), at::dtype());\n ClipKernel<<<\n CAFFE_GET_BLOCKS(X.numel()),\n CAFFE_CUDA_NUM_THREADS,\n 0,\n context_.cuda_stream()>>>(\n X.numel(), min_, max_, X.data(), Y->template mutable_data());\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\ntemplate <>\nbool ClipGradientOp::RunOnDevice() {\n auto& Y = Input(0);\n auto& dY = Input(1);\n\n CAFFE_ENFORCE_GE(Y.numel(), 0);\n CAFFE_ENFORCE_EQ(dY.numel(), Y.numel());\n auto* dX = Output(0, Y.sizes(), at::dtype());\n ClipGradientKernel<<<\n CAFFE_GET_BLOCKS(Y.numel()),\n CAFFE_CUDA_NUM_THREADS,\n 0,\n context_.cuda_stream()>>>(\n Y.numel(),\n min_,\n max_,\n Y.data(),\n dY.data(),\n dX->template mutable_data());\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(Clip, ClipOp);\nREGISTER_CUDA_OPERATOR(ClipGradient, ClipGradientOp);\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/clip_op.h\"\n\nnamespace caffe2 {\nnamespace {\n\ntemplate \n__device__ T hip_min(T x, T y);\ntemplate \n__device__ T hip_max(T x, T y);\ntemplate <>\n__device__ float hip_min(float x, float y) { return fminf(x, y); }\ntemplate <>\n__device__ float hip_max(float x, float y) { return fmaxf(x, y); }\n\n// Disabled since we don't use it right now.\n/*\ntemplate <>\n__device__ double hip_min(double x, double y) { return fmin(x, y); }\ntemplate <>\n__device__ double hip_max(double x, double y) { return fmax(x, y); }\n*/\n\n\ntemplate \n__global__ void ClipKernel(const int N, const T minval, const T maxval,\n const T* X, T* Y) {\n HIP_1D_KERNEL_LOOP(i, N) {\n Y[i] = hip_min(hip_max(X[i], minval), maxval);\n }\n}\n\ntemplate \n__global__ void ClipGradientKernel(const int N, const T minval,\n const T maxval, const T* Y,\n const T* dY, T* dX) {\n HIP_1D_KERNEL_LOOP(i, N) {\n dX[i] = dY[i] * (Y[i] > minval && Y[i] < maxval);\n }\n}\n} // namespace\n\ntemplate <>\nbool ClipOp::RunOnDevice() {\n auto& X = Input(0);\n\n CAFFE_ENFORCE_GE(X.numel(), 0);\n auto* Y = Output(0, X.sizes(), at::dtype());\n hipLaunchKernelGGL(( ClipKernel), \n dim3(CAFFE_GET_BLOCKS(X.numel())),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context_.hip_stream(), \n X.numel(), min_, max_, X.data(), Y->template mutable_data());\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\ntemplate <>\nbool ClipGradientOp::RunOnDevice() {\n auto& Y = Input(0);\n auto& dY = Input(1);\n\n CAFFE_ENFORCE_GE(Y.numel(), 0);\n CAFFE_ENFORCE_EQ(dY.numel(), Y.numel());\n auto* dX = Output(0, Y.sizes(), at::dtype());\n hipLaunchKernelGGL(( ClipGradientKernel), \n dim3(CAFFE_GET_BLOCKS(Y.numel())),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context_.hip_stream(), \n Y.numel(),\n min_,\n max_,\n Y.data(),\n dY.data(),\n dX->template mutable_data());\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(Clip, ClipOp);\nREGISTER_HIP_OPERATOR(ClipGradient, ClipGradientOp);\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/operators/no_default_engine_op.h\"\n\nnamespace caffe2 {\n// Communication operators do not have default engines.\nREGISTER_CUDA_OPERATOR(CreateCommonWorld, NoDefaultEngineOp);\nREGISTER_CUDA_OPERATOR(CloneCommonWorld, NoDefaultEngineOp);\nREGISTER_CUDA_OPERATOR(Broadcast, NoDefaultEngineOp);\nREGISTER_CUDA_OPERATOR(Reduce, NoDefaultEngineOp);\nREGISTER_CUDA_OPERATOR(Allgather, NoDefaultEngineOp);\nREGISTER_CUDA_OPERATOR(Allreduce, NoDefaultEngineOp);\nREGISTER_CUDA_OPERATOR(SendTensor, NoDefaultEngineOp);\nREGISTER_CUDA_OPERATOR(ReceiveTensor, NoDefaultEngineOp);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/operators/no_default_engine_op.h\"\n\nnamespace caffe2 {\n// Communication operators do not have default engines.\nREGISTER_HIP_OPERATOR(CreateCommonWorld, NoDefaultEngineOp);\nREGISTER_HIP_OPERATOR(CloneCommonWorld, NoDefaultEngineOp);\nREGISTER_HIP_OPERATOR(Broadcast, NoDefaultEngineOp);\nREGISTER_HIP_OPERATOR(Reduce, NoDefaultEngineOp);\nREGISTER_HIP_OPERATOR(Allgather, NoDefaultEngineOp);\nREGISTER_HIP_OPERATOR(Allreduce, NoDefaultEngineOp);\nREGISTER_HIP_OPERATOR(SendTensor, NoDefaultEngineOp);\nREGISTER_HIP_OPERATOR(ReceiveTensor, NoDefaultEngineOp);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/concat_split_op.h\"\n\nnamespace caffe2 {\nREGISTER_CUDA_OPERATOR(Split, SplitOp);\nREGISTER_CUDA_OPERATOR(Concat, ConcatOp);\n\n// Backward compatibility settings\nREGISTER_CUDA_OPERATOR(DepthSplit, SplitOp);\nREGISTER_CUDA_OPERATOR(DepthConcat, ConcatOp);\n\nREGISTER_CUDA_OPERATOR(SplitByLengths, SplitByLengthsOp);\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/concat_split_op.h\"\n\nnamespace caffe2 {\nREGISTER_HIP_OPERATOR(Split, SplitOp);\nREGISTER_HIP_OPERATOR(Concat, ConcatOp);\n\n// Backward compatibility settings\nREGISTER_HIP_OPERATOR(DepthSplit, SplitOp);\nREGISTER_HIP_OPERATOR(DepthConcat, ConcatOp);\n\nREGISTER_HIP_OPERATOR(SplitByLengths, SplitByLengthsOp);\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/operators/conv_op.h\"\n#include \"caffe2/operators/conv_op_impl.h\"\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\nREGISTER_CUDA_OPERATOR(Conv, ConvOp);\nREGISTER_CUDA_OPERATOR(ConvGradient, ConvGradientOp);\n\nREGISTER_CUDA_OPERATOR(Conv1D, ConvOp);\nREGISTER_CUDA_OPERATOR(Conv1DGradient, ConvGradientOp);\n\nREGISTER_CUDA_OPERATOR(Conv2D, ConvOp);\nREGISTER_CUDA_OPERATOR(Conv2DGradient, ConvGradientOp);\n\nREGISTER_CUDA_OPERATOR(Conv3D, ConvOp);\nREGISTER_CUDA_OPERATOR(Conv3DGradient, ConvGradientOp);\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/operators/conv_op.h\"\n#include \"caffe2/operators/conv_op_impl.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\nREGISTER_HIP_OPERATOR(Conv, ConvOp);\nREGISTER_HIP_OPERATOR(ConvGradient, ConvGradientOp);\n\nREGISTER_HIP_OPERATOR(Conv1D, ConvOp);\nREGISTER_HIP_OPERATOR(Conv1DGradient, ConvGradientOp);\n\nREGISTER_HIP_OPERATOR(Conv2D, ConvOp);\nREGISTER_HIP_OPERATOR(Conv2DGradient, ConvGradientOp);\n\nREGISTER_HIP_OPERATOR(Conv3D, ConvOp);\nREGISTER_HIP_OPERATOR(Conv3DGradient, ConvGradientOp);\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/conv_op_shared.h\"\n\nnamespace caffe2 {\n\ntemplate <>\nvoid createSharedBuffer(Workspace* ws) {\n auto* mutexPtr = ws->CreateBlob(\"__CAFFE2_SHARED_CONV_BUFFER_CUDA_MUTEX__\")\n ->GetMutable>();\n mutexPtr->reset(new std::mutex());\n ws->CreateBlob(\"__CAFFE2_SHARED_CONV_BUFFER_CUDA__\");\n}\n\ntemplate <>\nvoid runWithSharedBuffer(\n Workspace* ws,\n std::function f) {\n auto* mutexBlob = ws->GetBlob(\"__CAFFE2_SHARED_CONV_BUFFER_CUDA_MUTEX__\");\n CAFFE_ENFORCE(mutexBlob, \"Must call createSharedBuffer() first\");\n\n auto* mutexPtr = mutexBlob->GetMutable>();\n std::lock_guard g(**mutexPtr);\n auto* buffer = BlobGetMutableTensor(\n ws->GetBlob(\"__CAFFE2_SHARED_CONV_BUFFER_CUDA__\"), CUDA);\n f(buffer);\n}\n}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/conv_op_shared.h\"\n\nnamespace caffe2 {\n\ntemplate <>\nvoid createSharedBuffer(Workspace* ws) {\n auto* mutexPtr = ws->CreateBlob(\"__CAFFE2_SHARED_CONV_BUFFER_HIP_MUTEX__\")\n ->GetMutable>();\n mutexPtr->reset(new std::mutex());\n ws->CreateBlob(\"__CAFFE2_SHARED_CONV_BUFFER_HIP__\");\n}\n\ntemplate <>\nvoid runWithSharedBuffer(\n Workspace* ws,\n std::function f) {\n auto* mutexBlob = ws->GetBlob(\"__CAFFE2_SHARED_CONV_BUFFER_HIP_MUTEX__\");\n CAFFE_ENFORCE(mutexBlob, \"Must call createSharedBuffer() first\");\n\n auto* mutexPtr = mutexBlob->GetMutable>();\n std::lock_guard g(**mutexPtr);\n auto* buffer = BlobGetMutableTensor(\n ws->GetBlob(\"__CAFFE2_SHARED_CONV_BUFFER_HIP__\"), HIP);\n f(buffer);\n}\n}\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/conv_transpose_op.h\"\n#include \"caffe2/operators/conv_transpose_op_impl.h\"\n\nnamespace caffe2 {\nREGISTER_CUDA_OPERATOR(ConvTranspose, ConvTransposeOp);\nREGISTER_CUDA_OPERATOR(\n ConvTransposeGradient,\n ConvTransposeGradientOp);\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/conv_transpose_op.h\"\n#include \"caffe2/operators/conv_transpose_op_impl.h\"\n\nnamespace caffe2 {\nREGISTER_HIP_OPERATOR(ConvTranspose, ConvTransposeOp);\nREGISTER_HIP_OPERATOR(\n ConvTransposeGradient,\n ConvTransposeGradientOp);\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/copy_op.h\"\n\nnamespace caffe2 {\n\ntemplate <>\nclass CopyOnDeviceLikeOp\n : public Operator {\n public:\n template \n explicit CopyOnDeviceLikeOp(Args&&... args)\n : Operator(std::forward(args)...) {}\n USE_OPERATOR_FUNCTIONS(CUDAContext);\n\n bool RunOnDevice() override {\n auto& input = Input(0);\n auto* output = OperatorBase::Output(0, CUDA);\n CUDAContext context(GetGPUIDForPointer(Input(1).raw_data()));\n output->ResizeLike(input);\n context.template CopyItems(\n input.meta(),\n input.numel(),\n input.raw_data(),\n output->raw_mutable_data(input.meta()));\n return true;\n }\n};\n\n// From CPU, copy it to whatever the current context\nREGISTER_CUDA_OPERATOR(\n CopyFromCPUInput,\n CopyOp);\n\n// CopyGPUToCPU and CopyCPUToGPU should both be carried out in a cuda context,\n// since gpu code will be involved.\nREGISTER_CUDA_OPERATOR(\n CopyGPUToCPU,\n CopyOp);\nREGISTER_CUDA_OPERATOR(\n CopyCPUToGPU,\n CopyOp);\n// If we only specify Copy, we assume that it is a gpu to gpu copy - maybe\n// involving different GPUs.\nREGISTER_CUDA_OPERATOR(Copy, CopyOp);\n\nREGISTER_CUDA_OPERATOR(\n CopyOnDeviceLike,\n CopyOnDeviceLikeOp);\n} // namespace caffe2\n\nusing CopyGPUToCPU_CUDA = caffe2::\n CopyOp;\nusing CopyCPUToGPU_CUDA = caffe2::\n CopyOp;\n\nC10_EXPORT_CAFFE2_OP_TO_C10_CUDA(CopyGPUToCPU, CopyGPUToCPU_CUDA);\n\nC10_EXPORT_CAFFE2_OP_TO_C10_CPU_KERNEL_ONLY(CopyCPUToGPU, CopyCPUToGPU_CUDA);\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/copy_op.h\"\n\nnamespace caffe2 {\n\ntemplate <>\nclass CopyOnDeviceLikeOp\n : public Operator {\n public:\n template \n explicit CopyOnDeviceLikeOp(Args&&... args)\n : Operator(std::forward(args)...) {}\n USE_OPERATOR_FUNCTIONS(HIPContext);\n\n bool RunOnDevice() override {\n auto& input = Input(0);\n auto* output = OperatorBase::Output(0, HIP);\n HIPContext context(GetGPUIDForPointer(Input(1).raw_data()));\n output->ResizeLike(input);\n context.template CopyItems(\n input.meta(),\n input.numel(),\n input.raw_data(),\n output->raw_mutable_data(input.meta()));\n return true;\n }\n};\n\n// From CPU, copy it to whatever the current context\nREGISTER_HIP_OPERATOR(\n CopyFromCPUInput,\n CopyOp);\n\n// CopyGPUToCPU and CopyCPUToGPU should both be carried out in a cuda context,\n// since gpu code will be involved.\nREGISTER_HIP_OPERATOR(\n CopyGPUToCPU,\n CopyOp);\nREGISTER_HIP_OPERATOR(\n CopyCPUToGPU,\n CopyOp);\n// If we only specify Copy, we assume that it is a gpu to gpu copy - maybe\n// involving different GPUs.\nREGISTER_HIP_OPERATOR(Copy, CopyOp);\n\nREGISTER_HIP_OPERATOR(\n CopyOnDeviceLike,\n CopyOnDeviceLikeOp);\n} // namespace caffe2\n\nusing CopyGPUToCPU_HIP = caffe2::\n CopyOp;\nusing CopyCPUToGPU_HIP = caffe2::\n CopyOp;\n\nC10_EXPORT_CAFFE2_OP_TO_C10_HIP(CopyGPUToCPU, CopyGPUToCPU_HIP);\n\nC10_EXPORT_CAFFE2_OP_TO_C10_CPU_KERNEL_ONLY(CopyCPUToGPU, CopyCPUToGPU_HIP);\n###" }, { "cuda": "\n#include \"caffe2/operators/cosh_op.h\"\n\n#include \n#include \"caffe2/core/context_gpu.h\"\n\n#include \n#include \n\nnamespace caffe2 {\n\nnamespace {\n\n__global__ void CoshGradientCUDAKernel(\n const int N,\n const float* dY,\n const float* X,\n float* dX) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n#if __CUDA_ARCH__ >= 350\n dX[i] = __ldg(dY + i) * sinhf(__ldg(X + i));\n#else\n dX[i] = dY[i] * sinhf(X[i]);\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool CoshGradientFunctor::Forward(\n const std::vector& /* dY_dims */,\n const std::vector& X_dims,\n const T* dY,\n const T* X,\n T* dX,\n CUDAContext* context) const {\n const auto size = c10::multiply_integers(X_dims.cbegin(), X_dims.cend());\n CoshGradientCUDAKernel<<<\n CAFFE_GET_BLOCKS(size),\n CAFFE_CUDA_NUM_THREADS,\n 0,\n context->cuda_stream()>>>(size, dY, X, dX);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(\n Cosh,\n UnaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n CoshFunctor>);\nREGISTER_CUDA_OPERATOR(\n CoshGradient,\n BinaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n CoshGradientFunctor>);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/operators/cosh_op.h\"\n\n#include \n#include \"caffe2/core/hip/context_gpu.h\"\n\n#include \n#include \n\nnamespace caffe2 {\n\nnamespace {\n\n__global__ void CoshGradientHIPKernel(\n const int N,\n const float* dY,\n const float* X,\n float* dX) {\n HIP_1D_KERNEL_LOOP(i, N) {\n#if __HIP_ARCH__ >= 350\n dX[i] = __ldg(dY + i) * sinhf(__ldg(X + i));\n#else\n dX[i] = dY[i] * sinhf(X[i]);\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool CoshGradientFunctor::Forward(\n const std::vector& /* dY_dims */,\n const std::vector& X_dims,\n const T* dY,\n const T* X,\n T* dX,\n HIPContext* context) const {\n const auto size = c10::multiply_integers(X_dims.cbegin(), X_dims.cend());\n hipLaunchKernelGGL(( CoshGradientHIPKernel), \n dim3(CAFFE_GET_BLOCKS(size)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context->hip_stream(), size, dY, X, dX);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(\n Cosh,\n UnaryElementwiseOp<\n TensorTypes,\n HIPContext,\n CoshFunctor>);\nREGISTER_HIP_OPERATOR(\n CoshGradient,\n BinaryElementwiseOp<\n TensorTypes,\n HIPContext,\n CoshGradientFunctor>);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/cosine_embedding_criterion_op.h\"\n\nnamespace caffe2 {\nnamespace {\n\n\n__global__ void CECKernel(\n const int N, const float* S, const int* Y, const float margin,\n float* output) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n output[i] = Y[i] == 1 ? (1. - S[i]) : fmaxf(0.f, S[i] - margin);\n }\n}\n\n__global__ void CECGradientKernel(\n const int N, const float* S, const int* Y, const float* dOutput,\n const float margin, float* dS) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n dS[i] = dOutput[i] * (Y[i] == 1 ? -1 : static_cast(S[i] >= margin));\n }\n}\n} // namespace\n\ntemplate <>\nbool CosineEmbeddingCriterionOp::RunOnDevice() {\n auto& S = Input(0);\n auto& Y = Input(1);\n\n CAFFE_ENFORCE(S.numel() == Y.numel(),\n \"The embedding and label should have the same size.\");\n auto* output = Output(0, S.sizes(), at::dtype());\n\n const float* Sdata = S.data();\n const int* Ydata = Y.data();\n float* output_data = output->template mutable_data();\n\n CECKernel<<>>(\n S.numel(), Sdata, Ydata, margin_, output_data);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\ntemplate <>\nbool CosineEmbeddingCriterionGradientOp::RunOnDevice() {\n auto& S = Input(0);\n auto& Y = Input(1);\n auto& dOutput = Input(2);\n\n\n auto* dS = Output(0, S.sizes(), at::dtype());\n\n const float* Sdata = S.data();\n const int* Ydata = Y.data();\n const float* dOutput_data = dOutput.data();\n float* dSdata = dS->template mutable_data();\n CECGradientKernel<<>>(\n S.numel(), Sdata, Ydata, dOutput_data, margin_, dSdata);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(\n CosineEmbeddingCriterion,\n CosineEmbeddingCriterionOp);\nREGISTER_CUDA_OPERATOR(\n CosineEmbeddingCriterionGradient,\n CosineEmbeddingCriterionGradientOp);\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/cosine_embedding_criterion_op.h\"\n\nnamespace caffe2 {\nnamespace {\n\n\n__global__ void CECKernel(\n const int N, const float* S, const int* Y, const float margin,\n float* output) {\n HIP_1D_KERNEL_LOOP(i, N) {\n output[i] = Y[i] == 1 ? (1. - S[i]) : fmaxf(0.f, S[i] - margin);\n }\n}\n\n__global__ void CECGradientKernel(\n const int N, const float* S, const int* Y, const float* dOutput,\n const float margin, float* dS) {\n HIP_1D_KERNEL_LOOP(i, N) {\n dS[i] = dOutput[i] * (Y[i] == 1 ? -1 : static_cast(S[i] >= margin));\n }\n}\n} // namespace\n\ntemplate <>\nbool CosineEmbeddingCriterionOp::RunOnDevice() {\n auto& S = Input(0);\n auto& Y = Input(1);\n\n CAFFE_ENFORCE(S.numel() == Y.numel(),\n \"The embedding and label should have the same size.\");\n auto* output = Output(0, S.sizes(), at::dtype());\n\n const float* Sdata = S.data();\n const int* Ydata = Y.data();\n float* output_data = output->template mutable_data();\n\n hipLaunchKernelGGL(( CECKernel), dim3(CAFFE_GET_BLOCKS(S.numel())), dim3(CAFFE_HIP_NUM_THREADS),\n 0, context_.hip_stream(), \n S.numel(), Sdata, Ydata, margin_, output_data);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\ntemplate <>\nbool CosineEmbeddingCriterionGradientOp::RunOnDevice() {\n auto& S = Input(0);\n auto& Y = Input(1);\n auto& dOutput = Input(2);\n\n\n auto* dS = Output(0, S.sizes(), at::dtype());\n\n const float* Sdata = S.data();\n const int* Ydata = Y.data();\n const float* dOutput_data = dOutput.data();\n float* dSdata = dS->template mutable_data();\n hipLaunchKernelGGL(( CECGradientKernel), dim3(CAFFE_GET_BLOCKS(S.numel())), dim3(CAFFE_HIP_NUM_THREADS),\n 0, context_.hip_stream(), \n S.numel(), Sdata, Ydata, dOutput_data, margin_, dSdata);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(\n CosineEmbeddingCriterion,\n CosineEmbeddingCriterionOp);\nREGISTER_HIP_OPERATOR(\n CosineEmbeddingCriterionGradient,\n CosineEmbeddingCriterionGradientOp);\n} // namespace caffe2\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#define _USE_MATH_DEFINES\n\n#include \n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\nnamespace {\n\nvoid hardswish_kernel(TensorIterator& iter) {\n AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), \"hardswish_cuda\", [&]() {\n using opmath_t = at::opmath_type;\n const opmath_t zero(0.0f);\n const opmath_t one_sixth(1.0f / 6.0f);\n const opmath_t three(3.0f);\n const opmath_t six(6.0f);\n gpu_kernel(iter, [zero, one_sixth, three, six]GPU_LAMBDA(scalar_t self_val) -> scalar_t {\n opmath_t x = static_cast(self_val);\n return x * std::min(std::max(x + three, zero), six) * one_sixth;\n });\n });\n}\n\nvoid hardswish_backward_kernel(TensorIterator& iter) {\n AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), \"hardswish_backward_cuda\", [&]() {\n using opmath_t = at::opmath_type;\n const opmath_t zero(0.0f);\n const opmath_t three(3.0f);\n const opmath_t neg_three(-3.0f);\n const opmath_t one_half(0.5f);\n gpu_kernel(\n iter,\n [zero, three, neg_three, one_half]GPU_LAMBDA(scalar_t grad_val_, scalar_t self_val_) -> scalar_t {\n opmath_t grad_val = static_cast(grad_val_);\n opmath_t self_val = static_cast(self_val_);\n if (self_val < neg_three) {\n return zero;\n } else if (self_val <= three) {\n return grad_val * ((self_val / three) + one_half);\n } else {\n return grad_val;\n }\n });\n });\n}\n} // namespace\n\nREGISTER_DISPATCH(hardswish_stub, &hardswish_kernel);\nREGISTER_DISPATCH(hardswish_backward_stub, &hardswish_backward_kernel);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#define _USE_MATH_DEFINES\n\n#include \n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\nnamespace {\n\nvoid hardswish_kernel(TensorIterator& iter) {\n AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), \"hardswish_hip\", [&]() {\n using opmath_t = at::opmath_type;\n const opmath_t zero(0.0f);\n const opmath_t one_sixth(1.0f / 6.0f);\n const opmath_t three(3.0f);\n const opmath_t six(6.0f);\n gpu_kernel(iter, [zero, one_sixth, three, six]GPU_LAMBDA(scalar_t self_val) -> scalar_t {\n opmath_t x = static_cast(self_val);\n return x * ::min(::max(x + three, zero), six) * one_sixth;\n });\n });\n}\n\nvoid hardswish_backward_kernel(TensorIterator& iter) {\n AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), \"hardswish_backward_hip\", [&]() {\n using opmath_t = at::opmath_type;\n const opmath_t zero(0.0f);\n const opmath_t three(3.0f);\n const opmath_t neg_three(-3.0f);\n const opmath_t one_half(0.5f);\n gpu_kernel(\n iter,\n [zero, three, neg_three, one_half]GPU_LAMBDA(scalar_t grad_val_, scalar_t self_val_) -> scalar_t {\n opmath_t grad_val = static_cast(grad_val_);\n opmath_t self_val = static_cast(self_val_);\n if (self_val < neg_three) {\n return zero;\n } else if (self_val <= three) {\n return grad_val * ((self_val / three) + one_half);\n } else {\n return grad_val;\n }\n });\n });\n}\n} // namespace\n\nREGISTER_DISPATCH(hardswish_stub, &hardswish_kernel);\nREGISTER_DISPATCH(hardswish_backward_stub, &hardswish_backward_kernel);\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n\nnamespace at {\nnamespace cuda {\nnamespace cub {\n\ntemplate \nvoid radix_sort_keys(\n const key_t* keys_in,\n key_t* keys_out,\n int64_t n,\n bool descending,\n int64_t begin_bit,\n int64_t end_bit) {\n TORCH_CHECK(\n n <= std::numeric_limits::max(),\n \"cub sort does not support sorting more than INT_MAX elements\");\n using key_t_ = typename detail::cuda_type::type;\n\n const key_t_* keys_in_ = reinterpret_cast(keys_in);\n key_t_* keys_out_ = reinterpret_cast(keys_out);\n\n if (descending) {\n CUB_WRAPPER(\n NO_ROCM(at_cuda_detail)::cub::DeviceRadixSort::SortKeysDescending,\n keys_in_,\n keys_out_,\n n,\n begin_bit,\n end_bit,\n c10::cuda::getCurrentCUDAStream());\n } else {\n CUB_WRAPPER(\n NO_ROCM(at_cuda_detail)::cub::DeviceRadixSort::SortKeys,\n keys_in_,\n keys_out_,\n n,\n begin_bit,\n end_bit,\n c10::cuda::getCurrentCUDAStream());\n }\n}\n\n#define AT_INSTATIATE_CUB_TEMPLATES(scalar_t, ScalarType) \\\n template void radix_sort_keys( \\\n const scalar_t* keys_in, \\\n scalar_t* keys_out, \\\n int64_t n, \\\n bool descending, \\\n int64_t begin_bit, \\\n int64_t end_bit);\n\nAT_FORALL_SCALAR_TYPES_AND2(Bool, Half, AT_INSTATIATE_CUB_TEMPLATES)\n\n} // namespace cub\n} // namespace cuda\n} // namespace at\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n\nnamespace at {\nnamespace hip {\nnamespace cub {\n\ntemplate \nvoid radix_sort_keys(\n const key_t* keys_in,\n key_t* keys_out,\n int64_t n,\n bool descending,\n int64_t begin_bit,\n int64_t end_bit) {\n TORCH_CHECK(\n n <= std::numeric_limits::max(),\n \"cub sort does not support sorting more than INT_MAX elements\");\n using key_t_ = typename detail::hip_type::type;\n\n const key_t_* keys_in_ = reinterpret_cast(keys_in);\n key_t_* keys_out_ = reinterpret_cast(keys_out);\n\n if (descending) {\n CUB_WRAPPER(\n NO_ROCM(at_hip_detail)::hipcub::DeviceRadixSort::SortKeysDescending,\n keys_in_,\n keys_out_,\n n,\n begin_bit,\n end_bit,\n c10::hip::getCurrentHIPStream());\n } else {\n CUB_WRAPPER(\n NO_ROCM(at_hip_detail)::hipcub::DeviceRadixSort::SortKeys,\n keys_in_,\n keys_out_,\n n,\n begin_bit,\n end_bit,\n c10::hip::getCurrentHIPStream());\n }\n}\n\n#define AT_INSTATIATE_CUB_TEMPLATES(scalar_t, ScalarType) \\\n template void radix_sort_keys( \\\n const scalar_t* keys_in, \\\n scalar_t* keys_out, \\\n int64_t n, \\\n bool descending, \\\n int64_t begin_bit, \\\n int64_t end_bit);\n\nAT_FORALL_SCALAR_TYPES_AND2(Bool, Half, AT_INSTATIATE_CUB_TEMPLATES)\n\n} // namespace cub\n} // namespace hip\n} // namespace at\n###" }, { "cuda": "\n#include \"caffe2/operators/cos_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void\nCosGradientCUDAKernel(const int N, const T* dY, const T* X, T* dX) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n#if __CUDA_ARCH__ >= 350\n dX[i] = -__ldg(dY + i) * sin(__ldg(X + i));\n#else\n dX[i] = -dY[i] * sin(X[i]);\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool CosGradientFunctor::Forward(\n const std::vector& X_dims,\n const std::vector& /* dY_dims */,\n const T* X,\n const T* dY,\n T* dX,\n CUDAContext* context) const {\n const int size = std::accumulate(\n X_dims.cbegin(), X_dims.cend(), 1, std::multiplies());\n CosGradientCUDAKernel<<<\n CAFFE_GET_BLOCKS(size),\n CAFFE_CUDA_NUM_THREADS,\n 0,\n context->cuda_stream()>>>(size, dY, X, dX);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(\n Cos,\n UnaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n CosFunctor>);\nREGISTER_CUDA_OPERATOR(\n CosGradient,\n BinaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n CosGradientFunctor>);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/operators/cos_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void\nCosGradientHIPKernel(const int N, const T* dY, const T* X, T* dX) {\n HIP_1D_KERNEL_LOOP(i, N) {\n#if __HIP_ARCH__ >= 350\n dX[i] = -__ldg(dY + i) * sin(__ldg(X + i));\n#else\n dX[i] = -dY[i] * sin(X[i]);\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool CosGradientFunctor::Forward(\n const std::vector& X_dims,\n const std::vector& /* dY_dims */,\n const T* X,\n const T* dY,\n T* dX,\n HIPContext* context) const {\n const int size = std::accumulate(\n X_dims.cbegin(), X_dims.cend(), 1, std::multiplies());\n hipLaunchKernelGGL(( CosGradientHIPKernel), \n dim3(CAFFE_GET_BLOCKS(size)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context->hip_stream(), size, dY, X, dX);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(\n Cos,\n UnaryElementwiseOp<\n TensorTypes,\n HIPContext,\n CosFunctor>);\nREGISTER_HIP_OPERATOR(\n CosGradient,\n BinaryElementwiseOp<\n TensorTypes,\n HIPContext,\n CosGradientFunctor>);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/counter_ops.h\"\n\nnamespace caffe2 {\nREGISTER_CUDA_OPERATOR(CreateCounter, CreateCounterOp);\nREGISTER_CUDA_OPERATOR(ResetCounter, ResetCounterOp);\nREGISTER_CUDA_OPERATOR(CountDown, CountDownOp);\nREGISTER_CUDA_OPERATOR(\n CheckCounterDone,\n CheckCounterDoneOp);\nREGISTER_CUDA_OPERATOR(CountUp, CountUpOp);\nREGISTER_CUDA_OPERATOR(RetrieveCount, RetrieveCountOp);\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/counter_ops.h\"\n\nnamespace caffe2 {\nREGISTER_HIP_OPERATOR(CreateCounter, CreateCounterOp);\nREGISTER_HIP_OPERATOR(ResetCounter, ResetCounterOp);\nREGISTER_HIP_OPERATOR(CountDown, CountDownOp);\nREGISTER_HIP_OPERATOR(\n CheckCounterDone,\n CheckCounterDoneOp);\nREGISTER_HIP_OPERATOR(CountUp, CountUpOp);\nREGISTER_HIP_OPERATOR(RetrieveCount, RetrieveCountOp);\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/operators/cube_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void\nCubeGradientCUDAKernel(const int N, const T* dY, const T* X, T* dX) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n#if __CUDA_ARCH__ >= 350\n dX[i] = __ldg(dY + i) * __ldg(X + i) * __ldg(X + i) * T(3);\n#else\n dX[i] = dY[i] * X[i] * X[i] * T(3);\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool CubeGradientFunctor::Forward(\n const std::vector& dY_dims,\n const std::vector& /* X_dims */,\n const T* dY,\n const T* X,\n T* dX,\n CUDAContext* context) const {\n const int size = std::accumulate(\n dY_dims.cbegin(), dY_dims.cend(), 1, std::multiplies());\n CubeGradientCUDAKernel\n <<cuda_stream()>>>(size, dY, X, dX);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(\n Cube,\n UnaryElementwiseOp>);\nREGISTER_CUDA_OPERATOR(\n CubeGradient,\n BinaryElementwiseOp<\n NumericTypes,\n CUDAContext,\n CubeGradientFunctor>);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/operators/cube_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void\nCubeGradientHIPKernel(const int N, const T* dY, const T* X, T* dX) {\n HIP_1D_KERNEL_LOOP(i, N) {\n#if __HIP_ARCH__ >= 350\n dX[i] = __ldg(dY + i) * __ldg(X + i) * __ldg(X + i) * T(3);\n#else\n dX[i] = dY[i] * X[i] * X[i] * T(3);\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool CubeGradientFunctor::Forward(\n const std::vector& dY_dims,\n const std::vector& /* X_dims */,\n const T* dY,\n const T* X,\n T* dX,\n HIPContext* context) const {\n const int size = std::accumulate(\n dY_dims.cbegin(), dY_dims.cend(), 1, std::multiplies());\n hipLaunchKernelGGL(( CubeGradientHIPKernel)\n , dim3(CAFFE_GET_BLOCKS(size)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context->hip_stream(), size, dY, X, dX);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(\n Cube,\n UnaryElementwiseOp>);\nREGISTER_HIP_OPERATOR(\n CubeGradient,\n BinaryElementwiseOp<\n NumericTypes,\n HIPContext,\n CubeGradientFunctor>);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/data_couple.h\"\n\nnamespace caffe2 {\nREGISTER_CUDA_OPERATOR(DataCouple, DataCoupleOp);\n}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/data_couple.h\"\n\nnamespace caffe2 {\nREGISTER_HIP_OPERATOR(DataCouple, DataCoupleOp);\n}\n###" }, { "cuda": "\n#include \"caffe2/operators/do_op.h\"\n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_CUDA_OPERATOR(Do, DoOp);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/operators/do_op.h\"\n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_HIP_OPERATOR(Do, DoOp);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/dropout_op.h\"\nnamespace caffe2 {\nnamespace {\n__global__ void DropoutKernel(\n const int N, const float ratio, const float* Xdata, float* Ydata, bool* maskdata) {\n const float scale = 1. / (1. - ratio);\n CUDA_1D_KERNEL_LOOP(i, N) {\n maskdata[i] = (Ydata[i] > ratio);\n Ydata[i] = Xdata[i] * scale * maskdata[i];\n }\n}\n} \ntemplate <>\nbool DropoutOp::RunOnDevice() {\n auto& X = Input(0);\n auto* Y = Output(0, X.sizes(), at::dtype());\n if (is_test_) {\n if (Y != &X) {\n context_.CopySameDevice(\n X.numel(), X.data(), Y->template mutable_data());\n }\n return true;\n } else {\n \n \n \n float* Ydata = Y->template mutable_data();\n auto* mask = Output(1, X.sizes(), at::dtype());\n CAFFE_ENFORCE(X.data() != Ydata, \"In-place GPU dropout is broken\");\n CURAND_ENFORCE(\n curandGenerateUniform(context_.curand_generator(), Ydata, X.numel()));\n DropoutKernel<<<\n CAFFE_GET_BLOCKS(X.numel()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(\n X.numel(), ratio_, X.data(), Ydata, mask->template mutable_data());\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n return true;\n }\n}\nnamespace {\n__global__ void DropoutGradientKernel(\n const int N, const float* dYdata, const bool* maskdata, const float scale, float* dXdata) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n dXdata[i] = dYdata[i] * maskdata[i] * scale;\n }\n}\n} \ntemplate <>\nbool DropoutGradientOp::RunOnDevice() {\n auto& dY = Input(0);\n auto* dX = Output(0, dY.sizes(), at::dtype());\n if (is_test_) {\n if (dX != &dY) {\n context_.CopySameDevice(\n dY.numel(), dY.data(), dX->template mutable_data());\n }\n return true;\n } else {\n auto& mask = Input(1);\n CAFFE_ENFORCE_EQ(dY.numel(), mask.numel());\n const float scale = 1. / (1. - ratio_);\n DropoutGradientKernel<<<\n CAFFE_GET_BLOCKS(dY.numel()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(\n dY.numel(), dY.data(), mask.data(), scale, dX->template mutable_data());\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n return true;\n }\n}\nREGISTER_CUDA_OPERATOR(Dropout, DropoutOp);\nREGISTER_CUDA_OPERATOR(DropoutGrad, DropoutGradientOp);\n} \n\n###", "hip": " \n#include \"hip/hip_runtime.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/dropout_op.h\"\nnamespace caffe2 {\nnamespace {\n__global__ void DropoutKernel(\n const int N, const float ratio, const float* Xdata, float* Ydata, bool* maskdata) {\n const float scale = 1. / (1. - ratio);\n HIP_1D_KERNEL_LOOP(i, N) {\n maskdata[i] = (Ydata[i] > ratio);\n Ydata[i] = Xdata[i] * scale * maskdata[i];\n }\n}\n} \ntemplate <>\nbool DropoutOp::RunOnDevice() {\n auto& X = Input(0);\n auto* Y = Output(0, X.sizes(), at::dtype());\n if (is_test_) {\n if (Y != &X) {\n context_.CopySameDevice(\n X.numel(), X.data(), Y->template mutable_data());\n }\n return true;\n } else {\n \n \n \n float* Ydata = Y->template mutable_data();\n auto* mask = Output(1, X.sizes(), at::dtype());\n CAFFE_ENFORCE(X.data() != Ydata, \"In-place GPU dropout is broken\");\n HIPRAND_ENFORCE(\n hiprandGenerateUniform(context_.hiprand_generator(), Ydata, X.numel()));\n hipLaunchKernelGGL(( DropoutKernel), dim3(CAFFE_GET_BLOCKS(X.numel())), dim3(CAFFE_HIP_NUM_THREADS), 0, context_.hip_stream(), X.numel(), ratio_, X.data(), Ydata, mask->template mutable_data());\n C10_HIP_KERNEL_LAUNCH_CHECK();\n return true;\n }\n}\nnamespace {\n__global__ void DropoutGradientKernel(\n const int N, const float* dYdata, const bool* maskdata, const float scale, float* dXdata) {\n HIP_1D_KERNEL_LOOP(i, N) {\n dXdata[i] = dYdata[i] * maskdata[i] * scale;\n }\n}\n} \ntemplate <>\nbool DropoutGradientOp::RunOnDevice() {\n auto& dY = Input(0);\n auto* dX = Output(0, dY.sizes(), at::dtype());\n if (is_test_) {\n if (dX != &dY) {\n context_.CopySameDevice(\n dY.numel(), dY.data(), dX->template mutable_data());\n }\n return true;\n } else {\n auto& mask = Input(1);\n CAFFE_ENFORCE_EQ(dY.numel(), mask.numel());\n const float scale = 1. / (1. - ratio_);\n hipLaunchKernelGGL(( DropoutGradientKernel), dim3(CAFFE_GET_BLOCKS(dY.numel())), dim3(CAFFE_HIP_NUM_THREADS), 0, context_.hip_stream(), dY.numel(), dY.data(), mask.data(), scale, dX->template mutable_data());\n C10_HIP_KERNEL_LAUNCH_CHECK();\n return true;\n }\n}\nREGISTER_HIP_OPERATOR(Dropout, DropoutOp);\nREGISTER_HIP_OPERATOR(DropoutGrad, DropoutGradientOp);\n} ###" }, { "cuda": "\n#include \"caffe2/operators/elementwise_add_op.h\"\n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_CUDA_OPERATOR(\n Add,\n BinaryElementwiseOp>);\nREGISTER_CUDA_OPERATOR(\n AddGradient,\n BinaryElementwiseGradientOp<\n NumericTypes,\n CUDAContext,\n AddFunctor>);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/operators/elementwise_add_op.h\"\n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_HIP_OPERATOR(\n Add,\n BinaryElementwiseOp>);\nREGISTER_HIP_OPERATOR(\n AddGradient,\n BinaryElementwiseGradientOp<\n NumericTypes,\n HIPContext,\n AddFunctor>);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/operators/elementwise_op_test.h\"\n\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/core/flags.h\"\n\nC10_DECLARE_string(caffe_test_root);\n\ntemplate <>\nvoid CopyVector(const int N, const bool* x, bool* y) {\n CUDA_CHECK(cudaMemcpy(y, x, N * sizeof(bool), cudaMemcpyHostToDevice));\n}\n\ntemplate <>\ncaffe2::OperatorDef CreateOperatorDef() {\n caffe2::OperatorDef def;\n def.mutable_device_option()->set_device_type(caffe2::PROTO_CUDA);\n return def;\n}\n\nTEST(ElementwiseGPUTest, And) {\n if (!caffe2::HasCudaGPU())\n return;\n elementwiseAnd();\n}\n\nTEST(ElementwiseGPUTest, Or) {\n if (!caffe2::HasCudaGPU())\n return;\n elementwiseOr();\n}\n\nTEST(ElementwiseGPUTest, Xor) {\n if (!caffe2::HasCudaGPU())\n return;\n elementwiseXor();\n}\n\nTEST(ElementwiseGPUTest, Not) {\n if (!caffe2::HasCudaGPU())\n return;\n elementwiseNot();\n}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/operators/elementwise_op_test.h\"\n\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/core/flags.h\"\n\nC10_DECLARE_string(caffe_test_root);\n\ntemplate <>\nvoid CopyVector(const int N, const bool* x, bool* y) {\n HIP_CHECK(hipMemcpy(y, x, N * sizeof(bool), hipMemcpyHostToDevice));\n}\n\ntemplate <>\ncaffe2::OperatorDef CreateOperatorDef() {\n caffe2::OperatorDef def;\n def.mutable_device_option()->set_device_type(caffe2::PROTO_HIP);\n return def;\n}\n\nTEST(ElementwiseGPUTest, And) {\n if (!caffe2::HasHipGPU())\n return;\n elementwiseAnd();\n}\n\nTEST(ElementwiseGPUTest, Or) {\n if (!caffe2::HasHipGPU())\n return;\n elementwiseOr();\n}\n\nTEST(ElementwiseGPUTest, Xor) {\n if (!caffe2::HasHipGPU())\n return;\n elementwiseXor();\n}\n\nTEST(ElementwiseGPUTest, Not) {\n if (!caffe2::HasHipGPU())\n return;\n elementwiseNot();\n}\n###" }, { "cuda": "\n#include \"caffe2/operators/elementwise_sub_op.h\"\n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_CUDA_OPERATOR(\n Sub,\n BinaryElementwiseOp>);\nREGISTER_CUDA_OPERATOR(\n SubGradient,\n BinaryElementwiseGradientOp<\n NumericTypes,\n CUDAContext,\n SubFunctor>);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/operators/elementwise_sub_op.h\"\n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_HIP_OPERATOR(\n Sub,\n BinaryElementwiseOp>);\nREGISTER_HIP_OPERATOR(\n SubGradient,\n BinaryElementwiseGradientOp<\n NumericTypes,\n HIPContext,\n SubFunctor>);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/operators/elu_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void EluCUDAKernel(const int N, const T alpha, const T* X, T* Y);\n\ntemplate <>\n__global__ void\nEluCUDAKernel(const int N, const float alpha, const float* X, float* Y) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n#if __CUDA_ARCH__ >= 350\n Y[i] =\n __ldg(X + i) < 0 ? alpha * (expf(__ldg(X + i)) - 1.0f) : __ldg(X + i);\n#else\n Y[i] = X[i] < 0 ? alpha * (expf(X[i]) - 1.0f) : X[i];\n#endif\n }\n}\n\ntemplate \n__global__ void EluGradientCUDAKernel(\n const int N,\n const T alpha,\n const T* dY,\n const T* Y,\n T* dX) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n#if __CUDA_ARCH__ >= 350\n dX[i] = __ldg(Y + i) < 0 ? __ldg(dY + i) * (__ldg(Y + i) + alpha)\n : __ldg(dY + i);\n#else\n dX[i] = Y[i] < 0 ? dY[i] * (Y[i] + alpha) : dY[i];\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool EluFunctor::\noperator()(const int N, const T* X, T* Y, CUDAContext* context) const {\n EluCUDAKernel\n <<cuda_stream()>>>(N, alpha, X, Y);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\ntemplate <>\ntemplate \nbool EluGradientFunctor::Forward(\n const std::vector& Y_dims,\n const std::vector& /* dY_dims */,\n const T* Y,\n const T* dY,\n T* dX,\n CUDAContext* context) const {\n const int size = std::accumulate(\n Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies());\n EluGradientCUDAKernel\n <<cuda_stream()>>>(size, alpha, dY, Y, dX);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(\n Elu,\n UnaryElementwiseWithArgsOp<\n TensorTypes,\n CUDAContext,\n EluFunctor>);\nREGISTER_CUDA_OPERATOR(\n EluGradient,\n BinaryElementwiseWithArgsOp<\n TensorTypes,\n CUDAContext,\n EluGradientFunctor>);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/operators/elu_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void EluHIPKernel(const int N, const T alpha, const T* X, T* Y);\n\ntemplate <>\n__global__ void\nEluHIPKernel(const int N, const float alpha, const float* X, float* Y) {\n HIP_1D_KERNEL_LOOP(i, N) {\n#if __HIP_ARCH__ >= 350\n Y[i] =\n __ldg(X + i) < 0 ? alpha * (expf(__ldg(X + i)) - 1.0f) : __ldg(X + i);\n#else\n Y[i] = X[i] < 0 ? alpha * (expf(X[i]) - 1.0f) : X[i];\n#endif\n }\n}\n\ntemplate \n__global__ void EluGradientHIPKernel(\n const int N,\n const T alpha,\n const T* dY,\n const T* Y,\n T* dX) {\n HIP_1D_KERNEL_LOOP(i, N) {\n#if __HIP_ARCH__ >= 350\n dX[i] = __ldg(Y + i) < 0 ? __ldg(dY + i) * (__ldg(Y + i) + alpha)\n : __ldg(dY + i);\n#else\n dX[i] = Y[i] < 0 ? dY[i] * (Y[i] + alpha) : dY[i];\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool EluFunctor::\noperator()(const int N, const T* X, T* Y, HIPContext* context) const {\n hipLaunchKernelGGL(( EluHIPKernel)\n , dim3(CAFFE_GET_BLOCKS(N)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context->hip_stream(), N, alpha, X, Y);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\ntemplate <>\ntemplate \nbool EluGradientFunctor::Forward(\n const std::vector& Y_dims,\n const std::vector& /* dY_dims */,\n const T* Y,\n const T* dY,\n T* dX,\n HIPContext* context) const {\n const int size = std::accumulate(\n Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies());\n hipLaunchKernelGGL(( EluGradientHIPKernel)\n , dim3(CAFFE_GET_BLOCKS(size)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context->hip_stream(), size, alpha, dY, Y, dX);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(\n Elu,\n UnaryElementwiseWithArgsOp<\n TensorTypes,\n HIPContext,\n EluFunctor>);\nREGISTER_HIP_OPERATOR(\n EluGradient,\n BinaryElementwiseWithArgsOp<\n TensorTypes,\n HIPContext,\n EluGradientFunctor>);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#define _USE_MATH_DEFINES\n\n#include \n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\nnamespace {\n\nvoid hardtanh_backward_kernel(\n TensorIterator& iter,\n const Scalar& min,\n const Scalar& max) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half, at::ScalarType::BFloat16,\n iter.dtype(), \"hardtanh_backward_cuda\", [&]() {\n using opmath_t = at::opmath_type;\n auto min_val = min.to();\n auto max_val = max.to();\n gpu_kernel(\n iter,\n [min_val, max_val] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {\n opmath_t aop = static_cast(a);\n opmath_t bop = static_cast(b);\n return (bop <= min_val) || (bop >= max_val) ? opmath_t(0) : aop;\n });\n });\n}\n} // namespace\n\nREGISTER_DISPATCH(hardtanh_backward_stub, &hardtanh_backward_kernel);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#define _USE_MATH_DEFINES\n\n#include \n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\nnamespace {\n\nvoid hardtanh_backward_kernel(\n TensorIterator& iter,\n const Scalar& min,\n const Scalar& max) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half, at::ScalarType::BFloat16,\n iter.dtype(), \"hardtanh_backward_hip\", [&]() {\n using opmath_t = at::opmath_type;\n auto min_val = min.to();\n auto max_val = max.to();\n gpu_kernel(\n iter,\n [min_val, max_val] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {\n opmath_t aop = static_cast(a);\n opmath_t bop = static_cast(b);\n return (bop <= min_val) || (bop >= max_val) ? opmath_t(0) : aop;\n });\n });\n}\n} // namespace\n\nREGISTER_DISPATCH(hardtanh_backward_stub, &hardtanh_backward_kernel);\n\n} // namespace at::native\n###" }, { "cuda": "\n#include \"caffe2/operators/enforce_finite_op.h\"\n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\ntemplate <>\ntemplate \nbool EnforceFiniteOp::DoRunWithType() {\n buffer_.CopyFrom(Input(0)); // sync copy\n EnforceOnCPU(buffer_);\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(EnforceFinite, EnforceFiniteOp);\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/operators/enforce_finite_op.h\"\n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\ntemplate <>\ntemplate \nbool EnforceFiniteOp::DoRunWithType() {\n buffer_.CopyFrom(Input(0)); // sync copy\n EnforceOnCPU(buffer_);\n return true;\n}\n\nREGISTER_HIP_OPERATOR(EnforceFinite, EnforceFiniteOp);\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/operators/ensure_cpu_output_op.h\"\n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n// From CUDA Context, takes either CUDA or CPU tensor as input, and produce\n// TensorCPU\nREGISTER_CUDA_OPERATOR(EnsureCPUOutput, EnsureCPUOutputOp);\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/operators/ensure_cpu_output_op.h\"\n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n// From HIP Context, takes either HIP or CPU tensor as input, and produce\n// TensorCPU\nREGISTER_HIP_OPERATOR(EnsureCPUOutput, EnsureCPUOutputOp);\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/operators/erf_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\n__global__ void ErfGradientCUDAKernel(\n const int N,\n const float* dY,\n const float* X,\n float* dX) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n#if __CUDA_ARCH__ >= 350\n dX[i] = 2.0f / sqrtf(PI) * expf(-powf(__ldg(X+i), 2.0f)) * __ldg(dY + i);\n#else\n dX[i] = 2.0f / sqrtf(PI) * expf(-powf(X[i], 2.0f)) * dY[i];\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool ErfGradientFunctor::Forward(\n const std::vector& X_dims,\n const std::vector& /* dY_dims */,\n const T* X,\n const T* dY,\n T* dX,\n CUDAContext* context) const {\n const int size = std::accumulate(\n X_dims.cbegin(), X_dims.cend(), 1, std::multiplies());\n ErfGradientCUDAKernel<<<\n CAFFE_GET_BLOCKS(size),\n CAFFE_CUDA_NUM_THREADS,\n 0,\n context->cuda_stream()>>>(size, dY, X, dX);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(\n Erf,\n UnaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n ErfFunctor>);\nREGISTER_CUDA_OPERATOR(\n ErfGradient,\n BinaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n ErfGradientFunctor>);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/operators/erf_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\n__global__ void ErfGradientHIPKernel(\n const int N,\n const float* dY,\n const float* X,\n float* dX) {\n HIP_1D_KERNEL_LOOP(i, N) {\n#if __HIP_ARCH__ >= 350\n dX[i] = 2.0f / sqrtf(PI) * expf(-powf(__ldg(X+i), 2.0f)) * __ldg(dY + i);\n#else\n dX[i] = 2.0f / sqrtf(PI) * expf(-powf(X[i], 2.0f)) * dY[i];\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool ErfGradientFunctor::Forward(\n const std::vector& X_dims,\n const std::vector& /* dY_dims */,\n const T* X,\n const T* dY,\n T* dX,\n HIPContext* context) const {\n const int size = std::accumulate(\n X_dims.cbegin(), X_dims.cend(), 1, std::multiplies());\n hipLaunchKernelGGL(( ErfGradientHIPKernel), \n dim3(CAFFE_GET_BLOCKS(size)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context->hip_stream(), size, dY, X, dX);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(\n Erf,\n UnaryElementwiseOp<\n TensorTypes,\n HIPContext,\n ErfFunctor>);\nREGISTER_HIP_OPERATOR(\n ErfGradient,\n BinaryElementwiseOp<\n TensorTypes,\n HIPContext,\n ErfGradientFunctor>);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/operators/expand_op.h\"\n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_CUDA_OPERATOR(\n Expand,\n ExpandOp<\n TensorTypes,\n CUDAContext>);\nREGISTER_CUDA_OPERATOR(\n ExpandGradient,\n ExpandGradientOp<\n TensorTypes,\n CUDAContext>);\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/operators/expand_op.h\"\n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_HIP_OPERATOR(\n Expand,\n ExpandOp<\n TensorTypes,\n HIPContext>);\nREGISTER_HIP_OPERATOR(\n ExpandGradient,\n ExpandGradientOp<\n TensorTypes,\n HIPContext>);\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/expand_squeeze_dims_op.h\"\n\nnamespace caffe2 {\nREGISTER_CUDA_OPERATOR(Squeeze, SqueezeOp);\nREGISTER_CUDA_OPERATOR(ExpandDims, ExpandDimsOp);\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/expand_squeeze_dims_op.h\"\n\nnamespace caffe2 {\nREGISTER_HIP_OPERATOR(Squeeze, SqueezeOp);\nREGISTER_HIP_OPERATOR(ExpandDims, ExpandDimsOp);\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/operators/exp_op.h\"\n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_CUDA_OPERATOR(\n Exp,\n UnaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n ExpFunctor>);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/operators/exp_op.h\"\n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_HIP_OPERATOR(\n Exp,\n UnaryElementwiseOp<\n TensorTypes,\n HIPContext,\n ExpFunctor>);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/filler_op.h\"\n#include \"caffe2/operators/operator_fallback_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n__global__ void FillRangeKernel(const int n, float* data) {\n CUDA_1D_KERNEL_LOOP(index, n) {\n data[index] = index;\n }\n}\n\ntemplate \n__global__ void FillDiagonalKernel(\n const int num_diagonal_elements,\n const int64_t step_size,\n const T value,\n T* data) {\n CUDA_1D_KERNEL_LOOP(index, num_diagonal_elements) {\n data[index * step_size] = value;\n }\n}\n}\n\ntemplate <>\nbool RangeFillOp::Fill(Tensor* output) {\n int N = output->numel();\n FillRangeKernel<<<\n CAFFE_GET_BLOCKS(N),\n CAFFE_CUDA_NUM_THREADS,\n 0,\n context_.cuda_stream()>>>(N, output->template mutable_data());\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\ntemplate <>\ntemplate \nbool DiagonalFillOp::FillWithType(Tensor* output) {\n VerifyOutputShape(output);\n auto* data = output->template mutable_data();\n int size = output->numel();\n // first fill everything with 0\n math::Set(size, T(0), data, &context_);\n\n T value = OperatorBase::GetSingleArgument(\"value\", 0);\n int64_t step_size = GetStepSize(output);\n int num_diagonal_elements = ceil((float)size / step_size);\n\n FillDiagonalKernel<<<\n CAFFE_GET_BLOCKS(num_diagonal_elements),\n CAFFE_CUDA_NUM_THREADS,\n 0,\n context_.cuda_stream()>>>(num_diagonal_elements, step_size, value, data);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(UniformFill, UniformFillOp);\nREGISTER_CUDA_OPERATOR(UniformIntFill, UniformFillOp);\nREGISTER_CUDA_OPERATOR(ConstantFill, ConstantFillOp);\nREGISTER_CUDA_OPERATOR(DiagonalFill, DiagonalFillOp);\nREGISTER_CUDA_OPERATOR(GaussianFill, GaussianFillOp);\nREGISTER_CUDA_OPERATOR(XavierFill, XavierFillOp);\nREGISTER_CUDA_OPERATOR(MSRAFill, MSRAFillOp);\nREGISTER_CUDA_OPERATOR(RangeFill, RangeFillOp);\nREGISTER_CUDA_OPERATOR(LengthsRangeFill, GPUFallbackOp);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/filler_op.h\"\n#include \"caffe2/operators/hip/operator_fallback_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n__global__ void FillRangeKernel(const int n, float* data) {\n HIP_1D_KERNEL_LOOP(index, n) {\n data[index] = index;\n }\n}\n\ntemplate \n__global__ void FillDiagonalKernel(\n const int num_diagonal_elements,\n const int64_t step_size,\n const T value,\n T* data) {\n HIP_1D_KERNEL_LOOP(index, num_diagonal_elements) {\n data[index * step_size] = value;\n }\n}\n}\n\ntemplate <>\nbool RangeFillOp::Fill(Tensor* output) {\n int N = output->numel();\n hipLaunchKernelGGL(( FillRangeKernel), \n dim3(CAFFE_GET_BLOCKS(N)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context_.hip_stream(), N, output->template mutable_data());\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\ntemplate <>\ntemplate \nbool DiagonalFillOp::FillWithType(Tensor* output) {\n VerifyOutputShape(output);\n auto* data = output->template mutable_data();\n int size = output->numel();\n // first fill everything with 0\n math::Set(size, T(0), data, &context_);\n\n T value = OperatorBase::GetSingleArgument(\"value\", 0);\n int64_t step_size = GetStepSize(output);\n int num_diagonal_elements = ceil((float)size / step_size);\n\n hipLaunchKernelGGL(( FillDiagonalKernel), \n dim3(CAFFE_GET_BLOCKS(num_diagonal_elements)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context_.hip_stream(), num_diagonal_elements, step_size, value, data);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(UniformFill, UniformFillOp);\nREGISTER_HIP_OPERATOR(UniformIntFill, UniformFillOp);\nREGISTER_HIP_OPERATOR(ConstantFill, ConstantFillOp);\nREGISTER_HIP_OPERATOR(DiagonalFill, DiagonalFillOp);\nREGISTER_HIP_OPERATOR(GaussianFill, GaussianFillOp);\nREGISTER_HIP_OPERATOR(XavierFill, XavierFillOp);\nREGISTER_HIP_OPERATOR(MSRAFill, MSRAFillOp);\nREGISTER_HIP_OPERATOR(RangeFill, RangeFillOp);\nREGISTER_HIP_OPERATOR(LengthsRangeFill, GPUFallbackOp);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/find_op.h\"\n#include \"caffe2/utils/cub_namespace.cuh\"\n\nnamespace caffe2 {\n\ntemplate \n__global__ void FindKernel(\n int num_needles,\n int idx_size,\n const T* idx,\n const T* needles,\n int* out,\n int missing_value) {\n int needle_idx = blockIdx.x; // One cuda block per needle\n T q = needles[needle_idx];\n int res = (-1);\n for (int j = threadIdx.x; j < idx_size; j += CAFFE_CUDA_NUM_THREADS) {\n if (idx[j] == q) {\n res = max(res, j);\n }\n }\n typedef cub::BlockReduce BlockReduce;\n __shared__ typename BlockReduce::TempStorage temp_storage;\n int min_res = BlockReduce(temp_storage).Reduce(res, cub::Max());\n if (threadIdx.x == 0) {\n out[needle_idx] = min_res == (-1) ? missing_value : min_res;\n }\n}\n\ntemplate <>\ntemplate \nbool FindOp::DoRunWithType() {\n auto& idx = Input(0);\n auto& needles = Input(1);\n\n auto* res_indices = Output(0, needles.sizes(), at::dtype());\n\n const T* idx_data = idx.data();\n const T* needles_data = needles.data();\n int* res_data = res_indices->template mutable_data();\n\n FindKernel<\n T><<>>(\n needles.numel(),\n idx.numel(),\n idx_data,\n needles_data,\n res_data,\n missing_value_);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(Find, FindOp)\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/find_op.h\"\n#include \"caffe2/utils/cub_namespace.cuh\"\n\nnamespace caffe2 {\n\ntemplate \n__global__ void FindKernel(\n int num_needles,\n int idx_size,\n const T* idx,\n const T* needles,\n int* out,\n int missing_value) {\n int needle_idx = blockIdx.x; // One cuda block per needle\n T q = needles[needle_idx];\n int res = (-1);\n for (int j = threadIdx.x; j < idx_size; j += CAFFE_HIP_NUM_THREADS) {\n if (idx[j] == q) {\n res = max(res, j);\n }\n }\n typedef hipcub::BlockReduce BlockReduce;\n __shared__ typename BlockReduce::TempStorage temp_storage;\n int min_res = BlockReduce(temp_storage).Reduce(res, hipcub::Max());\n if (threadIdx.x == 0) {\n out[needle_idx] = min_res == (-1) ? missing_value : min_res;\n }\n}\n\ntemplate <>\ntemplate \nbool FindOp::DoRunWithType() {\n auto& idx = Input(0);\n auto& needles = Input(1);\n\n auto* res_indices = Output(0, needles.sizes(), at::dtype());\n\n const T* idx_data = idx.data();\n const T* needles_data = needles.data();\n int* res_data = res_indices->template mutable_data();\n\n hipLaunchKernelGGL(( FindKernel<\n T>), dim3(needles.numel()), dim3(CAFFE_HIP_NUM_THREADS), 0, context_.hip_stream(), \n needles.numel(),\n idx.numel(),\n idx_data,\n needles_data,\n res_data,\n missing_value_);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(Find, FindOp)\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/floor_op.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate \n__global__ void FloorKernel(const int N, const T* X, T* Y) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n Y[i] = std::floor(X[i]);\n }\n}\n\ntemplate <>\nbool FloorOp::RunOnDevice() {\n auto& X = Input(0);\n\n CAFFE_ENFORCE_GT(X.numel(), 0);\n auto* Y = Output(0, X.sizes(), at::dtype());\n FloorKernel<<<\n CAFFE_GET_BLOCKS(X.numel()),\n CAFFE_CUDA_NUM_THREADS,\n 0,\n context_.cuda_stream()>>>(\n X.numel(), X.data(), Y->template mutable_data());\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(Floor, FloorOp);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/floor_op.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate \n__global__ void FloorKernel(const int N, const T* X, T* Y) {\n HIP_1D_KERNEL_LOOP(i, N) {\n Y[i] = ::floor(X[i]);\n }\n}\n\ntemplate <>\nbool FloorOp::RunOnDevice() {\n auto& X = Input(0);\n\n CAFFE_ENFORCE_GT(X.numel(), 0);\n auto* Y = Output(0, X.sizes(), at::dtype());\n hipLaunchKernelGGL(( FloorKernel), \n dim3(CAFFE_GET_BLOCKS(X.numel())),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context_.hip_stream(), \n X.numel(), X.data(), Y->template mutable_data());\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(Floor, FloorOp);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/free_op.h\"\n\nnamespace caffe2 {\nREGISTER_CUDA_OPERATOR(Free, FreeOp);\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/free_op.h\"\n\nnamespace caffe2 {\nREGISTER_HIP_OPERATOR(Free, FreeOp);\n} // namespace caffe2\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#define _USE_MATH_DEFINES\n\n#include \n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\nnamespace {\n\nvoid leaky_relu_kernel(TensorIteratorBase& iter, const Scalar& negval_) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half,\n at::ScalarType::BFloat16,\n iter.dtype(),\n \"leaky_relu_cuda\",\n [&]() {\n using opmath_t = at::opmath_type;\n auto negval = negval_.to();\n gpu_kernel(iter, [negval] GPU_LAMBDA(scalar_t a) -> scalar_t {\n opmath_t aop = static_cast(a);\n return aop > opmath_t(0) ? aop : aop * negval;\n });\n });\n}\n\nvoid leaky_relu_backward_kernel(\n TensorIteratorBase& iter,\n const Scalar& negval_) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half,\n at::ScalarType::BFloat16,\n iter.dtype(),\n \"leaky_relu_backward_cuda\",\n [&]() {\n using opmath_t = at::opmath_type;\n auto negval = negval_.to();\n gpu_kernel(\n iter, [negval] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {\n opmath_t aop = static_cast(a);\n opmath_t bop = static_cast(b);\n return aop > opmath_t(0) ? bop : bop * negval;\n });\n });\n}\n} // namespace\n\nREGISTER_DISPATCH(leaky_relu_stub, &leaky_relu_kernel);\nREGISTER_DISPATCH(leaky_relu_backward_stub, &leaky_relu_backward_kernel);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#define _USE_MATH_DEFINES\n\n#include \n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\nnamespace {\n\nvoid leaky_relu_kernel(TensorIteratorBase& iter, const Scalar& negval_) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half,\n at::ScalarType::BFloat16,\n iter.dtype(),\n \"leaky_relu_hip\",\n [&]() {\n using opmath_t = at::opmath_type;\n auto negval = negval_.to();\n gpu_kernel(iter, [negval] GPU_LAMBDA(scalar_t a) -> scalar_t {\n opmath_t aop = static_cast(a);\n return aop > opmath_t(0) ? aop : aop * negval;\n });\n });\n}\n\nvoid leaky_relu_backward_kernel(\n TensorIteratorBase& iter,\n const Scalar& negval_) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half,\n at::ScalarType::BFloat16,\n iter.dtype(),\n \"leaky_relu_backward_hip\",\n [&]() {\n using opmath_t = at::opmath_type;\n auto negval = negval_.to();\n gpu_kernel(\n iter, [negval] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {\n opmath_t aop = static_cast(a);\n opmath_t bop = static_cast(b);\n return aop > opmath_t(0) ? bop : bop * negval;\n });\n });\n}\n} // namespace\n\nREGISTER_DISPATCH(leaky_relu_stub, &leaky_relu_kernel);\nREGISTER_DISPATCH(leaky_relu_backward_stub, &leaky_relu_backward_kernel);\n\n} // namespace at::native\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/gather_op.h\"\n#include \"caffe2/operators/gather_op.cuh\"\n\nnamespace caffe2 {\n\ntemplate <>\nbool GatherOp::RunOnDevice() {\n return DispatchHelper>::call(\n this, OperatorBase::Input(INDICES, CUDA));\n}\n\ntemplate <>\ntemplate \nbool GatherOp::DoRunWithType() {\n // Use shared implementation with BatchGather\n return gather_helper::gather_impl_cuda(\n this, DATA, INDICES, 0, axis_, wrap_indices_, match_outer_);\n}\n\nREGISTER_CUDA_OPERATOR(Gather, GatherOp);\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/gather_op.h\"\n#include \"caffe2/operators/hip/gather_op.cuh\"\n\nnamespace caffe2 {\n\ntemplate <>\nbool GatherOp::RunOnDevice() {\n return DispatchHelper>::call(\n this, OperatorBase::Input(INDICES, HIP));\n}\n\ntemplate <>\ntemplate \nbool GatherOp::DoRunWithType() {\n // Use shared implementation with BatchGather\n return gather_helper::gather_impl_hip(\n this, DATA, INDICES, 0, axis_, wrap_indices_, match_outer_);\n}\n\nREGISTER_HIP_OPERATOR(Gather, GatherOp);\n} // namespace caffe2\n###" }, { "cuda": "\n#ifndef CAFFE2_OPERATORS_UTILS_NMS_GPU_H_\n#define CAFFE2_OPERATORS_UTILS_NMS_GPU_H_\n\n#include \n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\nnamespace utils {\n\n// Computes Non-Maximum Suppression on the GPU\n// Reject a bounding box if its region has an intersection-overunion (IoU)\n// overlap with a higher scoring selected bounding box larger than a\n// threshold.\n//\n// d_desc_sorted_boxes : pixel coordinates of proposed bounding boxes\n// size: (N,4), format: [x1; y1; x2; y2]\n// the boxes are sorted by scores in descending order\n// N : number of boxes\n// d_keep_sorted_list : row indices of the selected proposals, sorted by score\n// h_nkeep : number of selected proposals\n// dev_delete_mask, host_delete_mask : Tensors that will be used as temp storage\n// by NMS\n// Those tensors will be resized to the necessary size\n// context : current CUDA context\nTORCH_API void nms_gpu_upright(\n const float* d_desc_sorted_boxes,\n const int N,\n const float thresh,\n const bool legacy_plus_one,\n int* d_keep_sorted_list,\n int* h_nkeep,\n TensorCUDA& dev_delete_mask,\n TensorCPU& host_delete_mask,\n CUDAContext* context);\n\nstruct RotatedBox {\n float x_ctr, y_ctr, w, h, a;\n};\n\n// Same as nms_gpu_upright, but for rotated boxes with angle info.\n// d_desc_sorted_boxes : pixel coordinates of proposed bounding boxes\n// size: (N,5), format: [x_ct; y_ctr; width; height; angle]\n// the boxes are sorted by scores in descending order\nTORCH_API void nms_gpu_rotated(\n const float* d_desc_sorted_boxes,\n const int N,\n const float thresh,\n int* d_keep_sorted_list,\n int* h_nkeep,\n TensorCUDA& dev_delete_mask,\n TensorCPU& host_delete_mask,\n CUDAContext* context);\n\nTORCH_API void nms_gpu(\n const float* d_desc_sorted_boxes,\n const int N,\n const float thresh,\n const bool legacy_plus_one,\n int* d_keep_sorted_list,\n int* h_nkeep,\n TensorCUDA& dev_delete_mask,\n TensorCPU& host_delete_mask,\n CUDAContext* context,\n const int box_dim);\n\n} // namespace utils\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_UTILS_NMS_GPU_H_\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#ifndef CAFFE2_OPERATORS_UTILS_NMS_GPU_H_\n#define CAFFE2_OPERATORS_UTILS_NMS_GPU_H_\n\n#include \n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\nnamespace utils {\n\n// Computes Non-Maximum Suppression on the GPU\n// Reject a bounding box if its region has an intersection-overunion (IoU)\n// overlap with a higher scoring selected bounding box larger than a\n// threshold.\n//\n// d_desc_sorted_boxes : pixel coordinates of proposed bounding boxes\n// size: (N,4), format: [x1; y1; x2; y2]\n// the boxes are sorted by scores in descending order\n// N : number of boxes\n// d_keep_sorted_list : row indices of the selected proposals, sorted by score\n// h_nkeep : number of selected proposals\n// dev_delete_mask, host_delete_mask : Tensors that will be used as temp storage\n// by NMS\n// Those tensors will be resized to the necessary size\n// context : current HIP context\nTORCH_API void nms_gpu_upright(\n const float* d_desc_sorted_boxes,\n const int N,\n const float thresh,\n const bool legacy_plus_one,\n int* d_keep_sorted_list,\n int* h_nkeep,\n TensorHIP& dev_delete_mask,\n TensorCPU& host_delete_mask,\n HIPContext* context);\n\nstruct RotatedBox {\n float x_ctr, y_ctr, w, h, a;\n};\n\n// Same as nms_gpu_upright, but for rotated boxes with angle info.\n// d_desc_sorted_boxes : pixel coordinates of proposed bounding boxes\n// size: (N,5), format: [x_ct; y_ctr; width; height; angle]\n// the boxes are sorted by scores in descending order\nTORCH_API void nms_gpu_rotated(\n const float* d_desc_sorted_boxes,\n const int N,\n const float thresh,\n int* d_keep_sorted_list,\n int* h_nkeep,\n TensorHIP& dev_delete_mask,\n TensorCPU& host_delete_mask,\n HIPContext* context);\n\nTORCH_API void nms_gpu(\n const float* d_desc_sorted_boxes,\n const int N,\n const float thresh,\n const bool legacy_plus_one,\n int* d_keep_sorted_list,\n int* h_nkeep,\n TensorHIP& dev_delete_mask,\n TensorCPU& host_delete_mask,\n HIPContext* context,\n const int box_dim);\n\n} // namespace utils\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_UTILS_NMS_GPU_H_\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/given_tensor_byte_string_to_uint8_fill_op.h\"\n\nnamespace caffe2 {\n\nREGISTER_CUDA_OPERATOR(\n GivenTensorByteStringToUInt8Fill,\n GivenTensorByteStringToUInt8FillOp);\n}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/given_tensor_byte_string_to_uint8_fill_op.h\"\n\nnamespace caffe2 {\n\nREGISTER_HIP_OPERATOR(\n GivenTensorByteStringToUInt8Fill,\n GivenTensorByteStringToUInt8FillOp);\n}\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/given_tensor_fill_op.h\"\n\nnamespace caffe2 {\n\nREGISTER_CUDA_OPERATOR(GivenTensorFill, GivenTensorFillOp);\nREGISTER_CUDA_OPERATOR(\n GivenTensorDoubleFill,\n GivenTensorFillOp);\nREGISTER_CUDA_OPERATOR(\n GivenTensorInt16Fill,\n GivenTensorFillOp);\nREGISTER_CUDA_OPERATOR(GivenTensorIntFill, GivenTensorFillOp);\nREGISTER_CUDA_OPERATOR(\n GivenTensorInt64Fill,\n GivenTensorFillOp);\nREGISTER_CUDA_OPERATOR(\n GivenTensorBoolFill,\n GivenTensorFillOp);\n}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/given_tensor_fill_op.h\"\n\nnamespace caffe2 {\n\nREGISTER_HIP_OPERATOR(GivenTensorFill, GivenTensorFillOp);\nREGISTER_HIP_OPERATOR(\n GivenTensorDoubleFill,\n GivenTensorFillOp);\nREGISTER_HIP_OPERATOR(\n GivenTensorInt16Fill,\n GivenTensorFillOp);\nREGISTER_HIP_OPERATOR(GivenTensorIntFill, GivenTensorFillOp);\nREGISTER_HIP_OPERATOR(\n GivenTensorInt64Fill,\n GivenTensorFillOp);\nREGISTER_HIP_OPERATOR(\n GivenTensorBoolFill,\n GivenTensorFillOp);\n}\n###" }, { "cuda": "\n#include \"caffe2/operators/glu_op.h\"\n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n__global__ void glu_kernel(\n const int M,\n const int split_dim_size,\n const int N,\n const float* Xdata,\n float* Ydata) {\n const int xOffset = 2 * split_dim_size * N;\n const int yOffset = split_dim_size * N;\n CUDA_1D_KERNEL_LOOP(index, M * split_dim_size * N) {\n const int i = index / split_dim_size / N;\n const int j = index / N % split_dim_size;\n const int k = index % N;\n const float x1 = Xdata[i * xOffset + j * N + k];\n const float x2 = Xdata[i * xOffset + (j + split_dim_size) * N + k];\n Ydata[i * yOffset + j * N + k] = x1 * (1. / (1. + exp(-x2)));\n }\n}\n} // namespace\n\ntemplate <>\nvoid GluOp::ComputeGlu(\n const int M,\n const int split_dim_size,\n const int N,\n const float* x_data,\n float* y_data) {\n glu_kernel<<<\n CAFFE_GET_BLOCKS(M * N * split_dim_size),\n CAFFE_CUDA_NUM_THREADS,\n 0,\n context_.cuda_stream()>>>(M, split_dim_size, N, x_data, y_data);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n}\n\nREGISTER_CUDA_OPERATOR(Glu, GluOp);\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/operators/glu_op.h\"\n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n__global__ void glu_kernel(\n const int M,\n const int split_dim_size,\n const int N,\n const float* Xdata,\n float* Ydata) {\n const int xOffset = 2 * split_dim_size * N;\n const int yOffset = split_dim_size * N;\n HIP_1D_KERNEL_LOOP(index, M * split_dim_size * N) {\n const int i = index / split_dim_size / N;\n const int j = index / N % split_dim_size;\n const int k = index % N;\n const float x1 = Xdata[i * xOffset + j * N + k];\n const float x2 = Xdata[i * xOffset + (j + split_dim_size) * N + k];\n Ydata[i * yOffset + j * N + k] = x1 * (1. / (1. + exp(-x2)));\n }\n}\n} // namespace\n\ntemplate <>\nvoid GluOp::ComputeGlu(\n const int M,\n const int split_dim_size,\n const int N,\n const float* x_data,\n float* y_data) {\n hipLaunchKernelGGL(( glu_kernel), \n dim3(CAFFE_GET_BLOCKS(M * N * split_dim_size)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context_.hip_stream(), M, split_dim_size, N, x_data, y_data);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n}\n\nREGISTER_HIP_OPERATOR(Glu, GluOp);\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/operators/half_float_ops.h\"\n#include \"caffe2/core/context_gpu.h\"\n#ifdef CAFFE_HAS_CUDA_FP16\nnamespace caffe2 {\nnamespace {\n__global__ void FloatToHalfKernel(const int N, const float* X, half* Y) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n Y[i] = __float2half(X[i]);\n }\n}\n__global__ void HalfToFloatKernel(const int N, const half* X, float* Y) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n Y[i] = __half2float(X[i]);\n }\n}\n}\ntemplate <>\nbool FloatToHalfOp::RunOnDevice() {\n auto& X = Input(0);\n auto* Y = Output(0, X.sizes(), at::dtype());\n FloatToHalfKernel<<<\n CAFFE_GET_BLOCKS(X.numel()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(\n X.numel(), X.data(), reinterpret_cast(Y->template mutable_data()));\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n return true;\n}\ntemplate <>\nbool HalfToFloatOp::RunOnDevice() {\n auto& X = Input(0);\n auto* Y = Output(0, X.sizes(), at::dtype());\n HalfToFloatKernel<<<\n CAFFE_GET_BLOCKS(X.numel()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(\n X.numel(), reinterpret_cast(X.data()), Y->template mutable_data());\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n return true;\n}\ntemplate <>\nbool Float16UniformFillOp::RunOnDevice() {\n auto* output = Output(0, shape_, at::dtype());\n at::Half* out = output->template mutable_data();\n auto leading_dim_sz = output->size(0);\n CAFFE_ENFORCE_GT(leading_dim_sz, 0, \"The input shape should have the first dimension greater than 0\");\n int rowsz = output->numel() / output->size(0);\n ReinitializeTensor(\n &temp_data_buffer_, {rowsz}, at::dtype().device(CUDA));\n float* temp_data = temp_data_buffer_.template mutable_data();\n for (uint64_t i = 0; i < leading_dim_sz; i++) {\n math::RandUniform(\n rowsz, min_, max_, temp_data, &context_);\n FloatToHalfKernel<<<\n CAFFE_GET_BLOCKS(rowsz), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(\n rowsz, temp_data, reinterpret_cast(out + i * rowsz));\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n }\n return true;\n}\nREGISTER_CUDA_OPERATOR(FloatToHalf, FloatToHalfOp);\nREGISTER_CUDA_OPERATOR(HalfToFloat, HalfToFloatOp);\nREGISTER_CUDA_OPERATOR(Float16UniformFill, Float16UniformFillOp);\n} \n#endif \n\n###", "hip": " \n#include \"hip/hip_runtime.h\"\n#include \"caffe2/operators/half_float_ops.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\n#ifdef CAFFE_HAS_HIP_FP16\nnamespace caffe2 {\nnamespace {\n__global__ void FloatToHalfKernel(const int N, const float* X, half* Y) {\n HIP_1D_KERNEL_LOOP(i, N) {\n Y[i] = __float2half(X[i]);\n }\n}\n__global__ void HalfToFloatKernel(const int N, const half* X, float* Y) {\n HIP_1D_KERNEL_LOOP(i, N) {\n Y[i] = __half2float(X[i]);\n }\n}\n}\ntemplate <>\nbool FloatToHalfOp::RunOnDevice() {\n auto& X = Input(0);\n auto* Y = Output(0, X.sizes(), at::dtype());\n hipLaunchKernelGGL(( FloatToHalfKernel), dim3(CAFFE_GET_BLOCKS(X.numel())), dim3(CAFFE_HIP_NUM_THREADS), 0, context_.hip_stream(), X.numel(), X.data(), reinterpret_cast(Y->template mutable_data()));\n C10_HIP_KERNEL_LAUNCH_CHECK();\n return true;\n}\ntemplate <>\nbool HalfToFloatOp::RunOnDevice() {\n auto& X = Input(0);\n auto* Y = Output(0, X.sizes(), at::dtype());\n hipLaunchKernelGGL(( HalfToFloatKernel), dim3(CAFFE_GET_BLOCKS(X.numel())), dim3(CAFFE_HIP_NUM_THREADS), 0, context_.hip_stream(), X.numel(), reinterpret_cast(X.data()), Y->template mutable_data());\n C10_HIP_KERNEL_LAUNCH_CHECK();\n return true;\n}\ntemplate <>\nbool Float16UniformFillOp::RunOnDevice() {\n auto* output = Output(0, shape_, at::dtype());\n at::Half* out = output->template mutable_data();\n auto leading_dim_sz = output->size(0);\n CAFFE_ENFORCE_GT(leading_dim_sz, 0, \"The input shape should have the first dimension greater than 0\");\n int rowsz = output->numel() / output->size(0);\n ReinitializeTensor(\n &temp_data_buffer_, {rowsz}, at::dtype().device(HIP));\n float* temp_data = temp_data_buffer_.template mutable_data();\n for (uint64_t i = 0; i < leading_dim_sz; i++) {\n math::RandUniform(\n rowsz, min_, max_, temp_data, &context_);\n hipLaunchKernelGGL(( FloatToHalfKernel), dim3(CAFFE_GET_BLOCKS(rowsz)), dim3(CAFFE_HIP_NUM_THREADS), 0, context_.hip_stream(), rowsz, temp_data, reinterpret_cast(out + i * rowsz));\n C10_HIP_KERNEL_LAUNCH_CHECK();\n }\n return true;\n}\nREGISTER_HIP_OPERATOR(FloatToHalf, FloatToHalfOp);\nREGISTER_HIP_OPERATOR(HalfToFloat, HalfToFloatOp);\nREGISTER_HIP_OPERATOR(Float16UniformFill, Float16UniformFillOp);\n} \n#endif ###" }, { "cuda": "\n#include \"caffe2/operators/hard_sigmoid_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void HardSigmoidCUDAKernel(\n const int N,\n const T alpha,\n const T beta,\n const T* X,\n T* Y) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n#if __CUDA_ARCH__ >= 350\n Y[i] = max(T(0), min(T(1), alpha * __ldg(X + i) + beta));\n#else\n Y[i] = max(T(0), min(T(1), alpha * X[i] + beta));\n#endif\n }\n}\n\ntemplate \n__global__ void HardSigmoidGradientCUDAKernel(\n const int N,\n const T alpha,\n const T* dY,\n const T* Y,\n T* dX) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n#if __CUDA_ARCH__ >= 350\n dX[i] = (__ldg(Y + i) > T(0) && __ldg(Y + i) < T(1)) ? __ldg(dY + i) * alpha\n : T(0);\n#else\n dX[i] = (Y[i] > T(0) && Y[i] < T(1)) ? dY[i] * alpha : T(0);\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool HardSigmoidFunctor::\noperator()(const int N, const T* X, T* Y, CUDAContext* context) const {\n HardSigmoidCUDAKernel\n <<cuda_stream()>>>(N, alpha, beta, X, Y);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\ntemplate <>\ntemplate \nbool HardSigmoidGradientFunctor::Forward(\n const std::vector& Y_dims,\n const std::vector& /* dY_dims */,\n const T* Y,\n const T* dY,\n T* dX,\n CUDAContext* context) const {\n const int size = std::accumulate(\n Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies());\n HardSigmoidGradientCUDAKernel\n <<cuda_stream()>>>(size, alpha, dY, Y, dX);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(\n HardSigmoid,\n UnaryElementwiseWithArgsOp<\n TensorTypes,\n CUDAContext,\n HardSigmoidFunctor>);\nREGISTER_CUDA_OPERATOR(\n HardSigmoidGradient,\n BinaryElementwiseWithArgsOp<\n TensorTypes,\n CUDAContext,\n HardSigmoidGradientFunctor>);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/operators/hard_sigmoid_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void HardSigmoidHIPKernel(\n const int N,\n const T alpha,\n const T beta,\n const T* X,\n T* Y) {\n HIP_1D_KERNEL_LOOP(i, N) {\n#if __HIP_ARCH__ >= 350\n Y[i] = max(T(0), min(T(1), alpha * __ldg(X + i) + beta));\n#else\n Y[i] = max(T(0), min(T(1), alpha * X[i] + beta));\n#endif\n }\n}\n\ntemplate \n__global__ void HardSigmoidGradientHIPKernel(\n const int N,\n const T alpha,\n const T* dY,\n const T* Y,\n T* dX) {\n HIP_1D_KERNEL_LOOP(i, N) {\n#if __HIP_ARCH__ >= 350\n dX[i] = (__ldg(Y + i) > T(0) && __ldg(Y + i) < T(1)) ? __ldg(dY + i) * alpha\n : T(0);\n#else\n dX[i] = (Y[i] > T(0) && Y[i] < T(1)) ? dY[i] * alpha : T(0);\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool HardSigmoidFunctor::\noperator()(const int N, const T* X, T* Y, HIPContext* context) const {\n hipLaunchKernelGGL(( HardSigmoidHIPKernel)\n , dim3(CAFFE_GET_BLOCKS(N)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context->hip_stream(), N, alpha, beta, X, Y);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\ntemplate <>\ntemplate \nbool HardSigmoidGradientFunctor::Forward(\n const std::vector& Y_dims,\n const std::vector& /* dY_dims */,\n const T* Y,\n const T* dY,\n T* dX,\n HIPContext* context) const {\n const int size = std::accumulate(\n Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies());\n hipLaunchKernelGGL(( HardSigmoidGradientHIPKernel)\n , dim3(CAFFE_GET_BLOCKS(size)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context->hip_stream(), size, alpha, dY, Y, dX);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(\n HardSigmoid,\n UnaryElementwiseWithArgsOp<\n TensorTypes,\n HIPContext,\n HardSigmoidFunctor>);\nREGISTER_HIP_OPERATOR(\n HardSigmoidGradient,\n BinaryElementwiseWithArgsOp<\n TensorTypes,\n HIPContext,\n HardSigmoidGradientFunctor>);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/operators/if_op.h\"\n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_CUDA_OPERATOR(If, IfOp);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/operators/if_op.h\"\n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_HIP_OPERATOR(If, IfOp);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/im2col_op.h\"\n\nnamespace caffe2 {\n\nREGISTER_CUDA_OPERATOR(Im2Col, Im2ColOp);\nREGISTER_CUDA_OPERATOR(Col2Im, Col2ImOp);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/im2col_op.h\"\n\nnamespace caffe2 {\n\nREGISTER_HIP_OPERATOR(Im2Col, Im2ColOp);\nREGISTER_HIP_OPERATOR(Col2Im, Col2ImOp);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/leaky_relu_op.h\"\n\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\nnamespace {\ntemplate \n__global__ void LeakyReluKernel(const int N, const T alpha, const T* X, T* Y) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n Y[i] = X[i] >= 0 ? X[i] : X[i] * alpha;\n }\n}\n\ntemplate \n__global__ void LeakyReluGradientKernel(\n const int N,\n const T alpha,\n const T* Y,\n const T* dY,\n T* dX) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n dX[i] = Y[i] >= 0 ? dY[i] : dY[i] * alpha;\n }\n}\n} // namespace\n\ntemplate <>\nbool LeakyReluOp::RunOnDevice() {\n const auto& X = Input(0);\n CAFFE_ENFORCE_GT(X.numel(), 0);\n\n auto* Y = Output(0, X.sizes(), at::dtype());\n LeakyReluKernel<<<\n CAFFE_GET_BLOCKS(X.numel()),\n CAFFE_CUDA_NUM_THREADS,\n 0,\n context_.cuda_stream()>>>(\n X.numel(), alpha_, X.data(), Y->template mutable_data());\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\ntemplate <>\nbool LeakyReluGradientOp::RunOnDevice() {\n const auto& Y = Input(0);\n const auto& dY = Input(1);\n\n auto* dX = Output(0, Y.sizes(), at::dtype());\n CAFFE_ENFORCE_EQ(Y.numel(), dY.numel());\n LeakyReluGradientKernel<<<\n CAFFE_GET_BLOCKS(Y.numel()),\n CAFFE_CUDA_NUM_THREADS,\n 0,\n context_.cuda_stream()>>>(\n Y.numel(),\n alpha_,\n Y.data(),\n dY.data(),\n dX->template mutable_data());\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(LeakyRelu, LeakyReluOp);\nREGISTER_CUDA_OPERATOR(\n LeakyReluGradient,\n LeakyReluGradientOp);\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/leaky_relu_op.h\"\n\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\nnamespace {\ntemplate \n__global__ void LeakyReluKernel(const int N, const T alpha, const T* X, T* Y) {\n HIP_1D_KERNEL_LOOP(i, N) {\n Y[i] = X[i] >= 0 ? X[i] : X[i] * alpha;\n }\n}\n\ntemplate \n__global__ void LeakyReluGradientKernel(\n const int N,\n const T alpha,\n const T* Y,\n const T* dY,\n T* dX) {\n HIP_1D_KERNEL_LOOP(i, N) {\n dX[i] = Y[i] >= 0 ? dY[i] : dY[i] * alpha;\n }\n}\n} // namespace\n\ntemplate <>\nbool LeakyReluOp::RunOnDevice() {\n const auto& X = Input(0);\n CAFFE_ENFORCE_GT(X.numel(), 0);\n\n auto* Y = Output(0, X.sizes(), at::dtype());\n hipLaunchKernelGGL(( LeakyReluKernel), \n dim3(CAFFE_GET_BLOCKS(X.numel())),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context_.hip_stream(), \n X.numel(), alpha_, X.data(), Y->template mutable_data());\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\ntemplate <>\nbool LeakyReluGradientOp::RunOnDevice() {\n const auto& Y = Input(0);\n const auto& dY = Input(1);\n\n auto* dX = Output(0, Y.sizes(), at::dtype());\n CAFFE_ENFORCE_EQ(Y.numel(), dY.numel());\n hipLaunchKernelGGL(( LeakyReluGradientKernel), \n dim3(CAFFE_GET_BLOCKS(Y.numel())),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context_.hip_stream(), \n Y.numel(),\n alpha_,\n Y.data(),\n dY.data(),\n dX->template mutable_data());\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(LeakyRelu, LeakyReluOp);\nREGISTER_HIP_OPERATOR(\n LeakyReluGradient,\n LeakyReluGradientOp);\n} // namespace caffe2\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#define _USE_MATH_DEFINES\n\n#include \n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\n// -----------------------------------\n// log_sigmoid forward\n// -----------------------------------\n\nvoid launch_log_sigmoid_forward_kernel(TensorIteratorBase& iter) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n kHalf, kBFloat16, iter.common_dtype(), \"log_sigmoid_forward_cuda\", [&] {\n using opmath_t = at::opmath_type;\n\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t in_) -> scalar_t {\n const opmath_t in = in_;\n const auto min = std::min(opmath_t(0), in);\n const auto z = std::exp(-std::abs(in));\n return min - std::log1p(z);\n });\n });\n}\n\nnamespace {\n// -----------------------------------\n// log_sigmoid backward\n// -----------------------------------\nvoid log_sigmoid_backward_kernel(TensorIterator& iter) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n kHalf, kBFloat16, iter.common_dtype(), \"log_sigmoid_backward_cuda\", [&] {\n using opmath_t = at::opmath_type;\n gpu_kernel(\n iter, [] GPU_LAMBDA(scalar_t in_, scalar_t grad_out_) -> scalar_t {\n const opmath_t in = in_;\n const opmath_t grad_out = grad_out_;\n\n auto in_negative = in < opmath_t(0);\n auto max_deriv = in_negative ? opmath_t(1) : opmath_t(0);\n auto sign = in_negative ? opmath_t(1) : -opmath_t(1);\n const auto z = std::exp(-std::abs(in));\n return grad_out * (max_deriv - sign * (z / (opmath_t(1) + z)));\n });\n });\n}\n} // namespace\n\nREGISTER_DISPATCH(log_sigmoid_backward_stub, &log_sigmoid_backward_kernel);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#define _USE_MATH_DEFINES\n\n#include \n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\n// -----------------------------------\n// log_sigmoid forward\n// -----------------------------------\n\nvoid launch_log_sigmoid_forward_kernel(TensorIteratorBase& iter) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n kHalf, kBFloat16, iter.common_dtype(), \"log_sigmoid_forward_hip\", [&] {\n using opmath_t = at::opmath_type;\n\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t in_) -> scalar_t {\n const opmath_t in = in_;\n const auto min = ::min(opmath_t(0), in);\n const auto z = ::exp(-std::abs(in));\n return min - std::log1p(z);\n });\n });\n}\n\nnamespace {\n// -----------------------------------\n// log_sigmoid backward\n// -----------------------------------\nvoid log_sigmoid_backward_kernel(TensorIterator& iter) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n kHalf, kBFloat16, iter.common_dtype(), \"log_sigmoid_backward_hip\", [&] {\n using opmath_t = at::opmath_type;\n gpu_kernel(\n iter, [] GPU_LAMBDA(scalar_t in_, scalar_t grad_out_) -> scalar_t {\n const opmath_t in = in_;\n const opmath_t grad_out = grad_out_;\n\n auto in_negative = in < opmath_t(0);\n auto max_deriv = in_negative ? opmath_t(1) : opmath_t(0);\n auto sign = in_negative ? opmath_t(1) : -opmath_t(1);\n const auto z = ::exp(-std::abs(in));\n return grad_out * (max_deriv - sign * (z / (opmath_t(1) + z)));\n });\n });\n}\n} // namespace\n\nREGISTER_DISPATCH(log_sigmoid_backward_stub, &log_sigmoid_backward_kernel);\n\n} // namespace at::native\n###" }, { "cuda": "\n#include \"caffe2/operators/lengths_pad_op.h\"\n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\nREGISTER_CUDA_OPERATOR(LengthsPad, LengthsPadOp);\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/operators/lengths_pad_op.h\"\n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\nREGISTER_HIP_OPERATOR(LengthsPad, LengthsPadOp);\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/load_save_op.h\"\n\nnamespace caffe2 {\n\ntemplate <>\nvoid LoadOp::SetCurrentDevice(BlobProto* proto) {\n if (proto->has_tensor()) {\n proto->mutable_tensor()->clear_device_detail();\n auto* device_detail = proto->mutable_tensor()->mutable_device_detail();\n device_detail->set_device_type(PROTO_CUDA);\n device_detail->set_device_id(CaffeCudaGetDevice());\n }\n}\n\nREGISTER_CUDA_OPERATOR(Load, LoadOp);\nREGISTER_CUDA_OPERATOR(Save, SaveOp);\nREGISTER_CUDA_OPERATOR(Checkpoint, CheckpointOp);\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/load_save_op.h\"\n\nnamespace caffe2 {\n\ntemplate <>\nvoid LoadOp::SetCurrentDevice(BlobProto* proto) {\n if (proto->has_tensor()) {\n proto->mutable_tensor()->clear_device_detail();\n auto* device_detail = proto->mutable_tensor()->mutable_device_detail();\n device_detail->set_device_type(PROTO_HIP);\n device_detail->set_device_id(CaffeHipGetDevice());\n }\n}\n\nREGISTER_HIP_OPERATOR(Load, LoadOp);\nREGISTER_HIP_OPERATOR(Save, SaveOp);\nREGISTER_HIP_OPERATOR(Checkpoint, CheckpointOp);\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/locally_connected_op.h\"\n#include \"caffe2/operators/locally_connected_op_impl.h\"\n\nnamespace caffe2 {\n\nREGISTER_CUDA_OPERATOR(LC, LocallyConnectedOp);\nREGISTER_CUDA_OPERATOR(\n LCGradient,\n LocallyConnectedGradientOp);\n\nREGISTER_CUDA_OPERATOR(LC1D, LocallyConnectedOp);\nREGISTER_CUDA_OPERATOR(\n LC1DGradient,\n LocallyConnectedGradientOp);\n\nREGISTER_CUDA_OPERATOR(LC2D, LocallyConnectedOp);\nREGISTER_CUDA_OPERATOR(\n LC2DGradient,\n LocallyConnectedGradientOp);\n\nREGISTER_CUDA_OPERATOR(LC3D, LocallyConnectedOp);\nREGISTER_CUDA_OPERATOR(\n LC3DGradient,\n LocallyConnectedGradientOp);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/locally_connected_op.h\"\n#include \"caffe2/operators/locally_connected_op_impl.h\"\n\nnamespace caffe2 {\n\nREGISTER_HIP_OPERATOR(LC, LocallyConnectedOp);\nREGISTER_HIP_OPERATOR(\n LCGradient,\n LocallyConnectedGradientOp);\n\nREGISTER_HIP_OPERATOR(LC1D, LocallyConnectedOp);\nREGISTER_HIP_OPERATOR(\n LC1DGradient,\n LocallyConnectedGradientOp);\n\nREGISTER_HIP_OPERATOR(LC2D, LocallyConnectedOp);\nREGISTER_HIP_OPERATOR(\n LC2DGradient,\n LocallyConnectedGradientOp);\n\nREGISTER_HIP_OPERATOR(LC3D, LocallyConnectedOp);\nREGISTER_HIP_OPERATOR(\n LC3DGradient,\n LocallyConnectedGradientOp);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/operators/log1p_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void\nLog1pGradientCUDAKernel(const int N, const T* dY, const T* X, T* dX) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n#if __CUDA_ARCH__ >= 350\n dX[i] = __ldg(dY + i) / (__ldg(X + i) + T(1));\n#else\n dX[i] = dY[i] / (X[i] + T(1));\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool Log1pGradientFunctor::Forward(\n const std::vector& X_dims,\n const std::vector& /* dY_dims */,\n const T* X,\n const T* dY,\n T* dX,\n CUDAContext* context) const {\n const int size = std::accumulate(\n X_dims.cbegin(), X_dims.cend(), 1, std::multiplies());\n Log1pGradientCUDAKernel\n <<cuda_stream()>>>(size, dY, X, dX);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(\n Log1p,\n UnaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n Log1pFunctor>);\nREGISTER_CUDA_OPERATOR(\n Log1pGradient,\n BinaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n Log1pGradientFunctor>);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/operators/log1p_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void\nLog1pGradientHIPKernel(const int N, const T* dY, const T* X, T* dX) {\n HIP_1D_KERNEL_LOOP(i, N) {\n#if __HIP_ARCH__ >= 350\n dX[i] = __ldg(dY + i) / (__ldg(X + i) + T(1));\n#else\n dX[i] = dY[i] / (X[i] + T(1));\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool Log1pGradientFunctor::Forward(\n const std::vector& X_dims,\n const std::vector& /* dY_dims */,\n const T* X,\n const T* dY,\n T* dX,\n HIPContext* context) const {\n const int size = std::accumulate(\n X_dims.cbegin(), X_dims.cend(), 1, std::multiplies());\n hipLaunchKernelGGL(( Log1pGradientHIPKernel)\n , dim3(CAFFE_GET_BLOCKS(size)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context->hip_stream(), size, dY, X, dX);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(\n Log1p,\n UnaryElementwiseOp<\n TensorTypes,\n HIPContext,\n Log1pFunctor>);\nREGISTER_HIP_OPERATOR(\n Log1pGradient,\n BinaryElementwiseOp<\n TensorTypes,\n HIPContext,\n Log1pGradientFunctor>);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/operators/logit_op.h\"\n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void LogitKernel(const int N, const T* X, const float eps, T* Y) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n Y[i] = fminf(X[i], (T(1) - eps));\n Y[i] = fmaxf(Y[i], eps);\n Y[i] = logf(Y[i] / (T(1) - Y[i]));\n }\n}\n\ntemplate \n__global__ void LogitGradientKernel(\n const int N,\n const T* X,\n const T* dY,\n const float eps,\n T* dX) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n dX[i] = (X[i] < eps || X[i] > T(1) - eps) ? T(0)\n : (dY[i] / X[i] / (T(1) - X[i]));\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool LogitFunctor::\noperator()(const int N, const T* X, T* Y, CUDAContext* context) const {\n LogitKernel\n <<cuda_stream()>>>(N, X, eps_, Y);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\ntemplate <>\nbool LogitGradientOp::RunOnDevice() {\n auto& X = Input(0);\n auto& dY = Input(1);\n auto* dX = Output(0);\n dX->ResizeLike(X);\n int n = X.size();\n LogitGradientKernel<<<\n CAFFE_GET_BLOCKS(n),\n CAFFE_CUDA_NUM_THREADS,\n 0,\n context_.cuda_stream()>>>(\n n,\n X.data(),\n dY.data(),\n eps_,\n dX->template mutable_data());\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(\n Logit,\n UnaryElementwiseWithArgsOp<\n TensorTypes,\n CUDAContext,\n LogitFunctor>);\nREGISTER_CUDA_OPERATOR(LogitGradient, LogitGradientOp);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/operators/logit_op.h\"\n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void LogitKernel(const int N, const T* X, const float eps, T* Y) {\n HIP_1D_KERNEL_LOOP(i, N) {\n Y[i] = fminf(X[i], (T(1) - eps));\n Y[i] = fmaxf(Y[i], eps);\n Y[i] = logf(Y[i] / (T(1) - Y[i]));\n }\n}\n\ntemplate \n__global__ void LogitGradientKernel(\n const int N,\n const T* X,\n const T* dY,\n const float eps,\n T* dX) {\n HIP_1D_KERNEL_LOOP(i, N) {\n dX[i] = (X[i] < eps || X[i] > T(1) - eps) ? T(0)\n : (dY[i] / X[i] / (T(1) - X[i]));\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool LogitFunctor::\noperator()(const int N, const T* X, T* Y, HIPContext* context) const {\n hipLaunchKernelGGL(( LogitKernel)\n , dim3(CAFFE_GET_BLOCKS(N)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context->hip_stream(), N, X, eps_, Y);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\ntemplate <>\nbool LogitGradientOp::RunOnDevice() {\n auto& X = Input(0);\n auto& dY = Input(1);\n auto* dX = Output(0);\n dX->ResizeLike(X);\n int n = X.size();\n hipLaunchKernelGGL(( LogitGradientKernel), \n dim3(CAFFE_GET_BLOCKS(n)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context_.hip_stream(), \n n,\n X.data(),\n dY.data(),\n eps_,\n dX->template mutable_data());\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(\n Logit,\n UnaryElementwiseWithArgsOp<\n TensorTypes,\n HIPContext,\n LogitFunctor>);\nREGISTER_HIP_OPERATOR(LogitGradient, LogitGradientOp);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/operators/log_op.h\"\n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_CUDA_OPERATOR(\n Log,\n UnaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n LogFunctor>);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/operators/log_op.h\"\n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_HIP_OPERATOR(\n Log,\n UnaryElementwiseOp<\n TensorTypes,\n HIPContext,\n LogFunctor>);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/loss_op.h\"\n\nnamespace caffe2 {\nREGISTER_CUDA_OPERATOR(AveragedLoss, AveragedLoss);\nREGISTER_CUDA_OPERATOR(\n AveragedLossGradient,\n AveragedLossGradient);\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/loss_op.h\"\n\nnamespace caffe2 {\nREGISTER_HIP_OPERATOR(AveragedLoss, AveragedLoss);\nREGISTER_HIP_OPERATOR(\n AveragedLossGradient,\n AveragedLossGradient);\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/lpnorm_op.h\"\n#include \"caffe2/operators/operator_fallback_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_CUDA_OPERATOR(LpNorm, GPUFallbackOp);\nREGISTER_CUDA_OPERATOR(LpNormGradient, GPUFallbackOp);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/lpnorm_op.h\"\n#include \"caffe2/operators/hip/operator_fallback_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_HIP_OPERATOR(LpNorm, GPUFallbackOp);\nREGISTER_HIP_OPERATOR(LpNormGradient, GPUFallbackOp);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/operators/matmul_op.h\"\n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_CUDA_OPERATOR(MatMul, MatMulOp);\n\n}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/operators/matmul_op.h\"\n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_HIP_OPERATOR(MatMul, MatMulOp);\n\n}\n###" }, { "cuda": "\n#pragma once\n\n#include \n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/operators/conv_pool_op_base.h\"\n#include \"caffe2/operators/pool_op.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\nclass MaxPoolWithIndexOp final : public ConvPoolOpBase {\n public:\n USE_CONV_POOL_BASE_FUNCTIONS(CUDAContext);\n MaxPoolWithIndexOp(const OperatorDef& operator_def, Workspace* ws)\n : ConvPoolOpBase(operator_def, ws) {}\n ~MaxPoolWithIndexOp() {}\n\n template \n bool DoRunWithType();\n\n bool RunOnDevice() override;\n\n // Input: X\n // Output: Y, mask\n};\n\nclass MaxPoolWithIndexGradientOp final : public ConvPoolOpBase {\n public:\n USE_CONV_POOL_BASE_FUNCTIONS(CUDAContext);\n MaxPoolWithIndexGradientOp(const OperatorDef& operator_def, Workspace* ws)\n : ConvPoolOpBase(operator_def, ws) {}\n ~MaxPoolWithIndexGradientOp() {}\n\n template \n bool DoRunWithType();\n\n bool RunOnDevice() override;\n\n // Input: X, dY, mask\n // Output: dX\n};\n\n}; // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#pragma once\n\n#include \n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/operators/conv_pool_op_base.h\"\n#include \"caffe2/operators/pool_op.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\nclass MaxPoolWithIndexOp final : public ConvPoolOpBase {\n public:\n USE_CONV_POOL_BASE_FUNCTIONS(HIPContext);\n MaxPoolWithIndexOp(const OperatorDef& operator_def, Workspace* ws)\n : ConvPoolOpBase(operator_def, ws) {}\n ~MaxPoolWithIndexOp() {}\n\n template \n bool DoRunWithType();\n\n bool RunOnDevice() override;\n\n // Input: X\n // Output: Y, mask\n};\n\nclass MaxPoolWithIndexGradientOp final : public ConvPoolOpBase {\n public:\n USE_CONV_POOL_BASE_FUNCTIONS(HIPContext);\n MaxPoolWithIndexGradientOp(const OperatorDef& operator_def, Workspace* ws)\n : ConvPoolOpBase(operator_def, ws) {}\n ~MaxPoolWithIndexGradientOp() {}\n\n template \n bool DoRunWithType();\n\n bool RunOnDevice() override;\n\n // Input: X, dY, mask\n // Output: dX\n};\n\n}; // namespace caffe2\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#define _USE_MATH_DEFINES\n\n#include \n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\nnamespace {\n\nvoid mish_kernel(TensorIteratorBase& iter) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half,\n at::ScalarType::BFloat16,\n iter.dtype(),\n \"mish_cuda\",\n [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) -> scalar_t {\n using opmath_t = at::opmath_type;\n const opmath_t x_acc = static_cast(x);\n return x_acc *\n c10::cuda::compat::tanh(\n c10::cuda::compat::log1p(c10::cuda::compat::exp(x_acc)));\n });\n });\n}\n\nvoid mish_backward_kernel(TensorIterator& iter) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half,\n at::ScalarType::BFloat16,\n iter.dtype(),\n \"mish_backward_cuda\",\n [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {\n using opmath_t = at::opmath_type;\n const opmath_t dy_acc = static_cast(dy);\n const opmath_t x_acc = static_cast(x);\n const opmath_t s_acc =\n opmath_t(1) / (opmath_t(1) + c10::cuda::compat::exp(-x_acc));\n const opmath_t t_acc = c10::cuda::compat::tanh(\n c10::cuda::compat::log1p(c10::cuda::compat::exp(x_acc)));\n return dy_acc *\n (t_acc + x_acc * s_acc * (opmath_t(1) - t_acc * t_acc));\n });\n });\n}\n} // namespace\n\nREGISTER_DISPATCH(mish_stub, &mish_kernel);\nREGISTER_DISPATCH(mish_backward_stub, &mish_backward_kernel);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#define _USE_MATH_DEFINES\n\n#include \n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\nnamespace {\n\nvoid mish_kernel(TensorIteratorBase& iter) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half,\n at::ScalarType::BFloat16,\n iter.dtype(),\n \"mish_hip\",\n [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) -> scalar_t {\n using opmath_t = at::opmath_type;\n const opmath_t x_acc = static_cast(x);\n return x_acc *\n c10::hip::compat::tanh(\n c10::hip::compat::log1p(c10::hip::compat::exp(x_acc)));\n });\n });\n}\n\nvoid mish_backward_kernel(TensorIterator& iter) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half,\n at::ScalarType::BFloat16,\n iter.dtype(),\n \"mish_backward_hip\",\n [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {\n using opmath_t = at::opmath_type;\n const opmath_t dy_acc = static_cast(dy);\n const opmath_t x_acc = static_cast(x);\n const opmath_t s_acc =\n opmath_t(1) / (opmath_t(1) + c10::hip::compat::exp(-x_acc));\n const opmath_t t_acc = c10::hip::compat::tanh(\n c10::hip::compat::log1p(c10::hip::compat::exp(x_acc)));\n return dy_acc *\n (t_acc + x_acc * s_acc * (opmath_t(1) - t_acc * t_acc));\n });\n });\n}\n} // namespace\n\nREGISTER_DISPATCH(mish_stub, &mish_kernel);\nREGISTER_DISPATCH(mish_backward_stub, &mish_backward_kernel);\n\n} // namespace at::native\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/mean_op.h\"\n\nnamespace caffe2 {\n\nREGISTER_CUDA_OPERATOR(Mean, MeanOp);\nREGISTER_CUDA_OPERATOR(MeanGradient, MeanGradientOp);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/mean_op.h\"\n\nnamespace caffe2 {\n\nREGISTER_HIP_OPERATOR(Mean, MeanOp);\nREGISTER_HIP_OPERATOR(MeanGradient, MeanGradientOp);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\nnamespace {\n\nclass GetGPUMemoryUsageOp final : public Operator {\n public:\n template explicit GetGPUMemoryUsageOp(Args&&... args)\n : Operator(std::forward(args)...) {}\n ~GetGPUMemoryUsageOp() override {}\n\n bool RunOnDevice() override {\n TORCH_CHECK_EQ(InputSize(), 0);\n TORCH_CHECK_EQ(OutputSize(), 1);\n std::vector total_by_gpu = CUDAContext::TotalMemoryByGpu();\n std::vector max_by_gpu = CUDAContext::MaxMemoryByGpu();\n TORCH_CHECK_EQ(total_by_gpu.size(), max_by_gpu.size());\n\n\n auto* stats = Output(0, {2, static_cast(total_by_gpu.size())}, at::dtype());\n context_.CopyFromCPU(\n total_by_gpu.size(),\n total_by_gpu.data(),\n stats->template mutable_data());\n context_.CopyFromCPU(\n max_by_gpu.size(),\n max_by_gpu.data(),\n stats->template mutable_data() + total_by_gpu.size());\n return true;\n }\n};\n\nOPERATOR_SCHEMA(GetGPUMemoryUsage)\n .NumInputs(0)\n .NumOutputs(1)\n .SetDoc(R\"DOC(Fetches GPU memory stats from CUDAContext. Result is stored\n in output blob with shape (2, num_gpus). First row contains the total\n current memory usage, and the second row the maximum usage during\n this execution.\n\n NOTE: --caffe2_gpu_memory_tracking flag must be enabled to use this op.\n )DOC\");\n\nREGISTER_CUDA_OPERATOR(GetGPUMemoryUsage, GetGPUMemoryUsageOp);\n}\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\nnamespace {\n\nclass GetGPUMemoryUsageOp final : public Operator {\n public:\n template explicit GetGPUMemoryUsageOp(Args&&... args)\n : Operator(std::forward(args)...) {}\n ~GetGPUMemoryUsageOp() override {}\n\n bool RunOnDevice() override {\n TORCH_CHECK_EQ(InputSize(), 0);\n TORCH_CHECK_EQ(OutputSize(), 1);\n std::vector total_by_gpu = HIPContext::TotalMemoryByGpu();\n std::vector max_by_gpu = HIPContext::MaxMemoryByGpu();\n TORCH_CHECK_EQ(total_by_gpu.size(), max_by_gpu.size());\n\n\n auto* stats = Output(0, {2, static_cast(total_by_gpu.size())}, at::dtype());\n context_.CopyFromCPU(\n total_by_gpu.size(),\n total_by_gpu.data(),\n stats->template mutable_data());\n context_.CopyFromCPU(\n max_by_gpu.size(),\n max_by_gpu.data(),\n stats->template mutable_data() + total_by_gpu.size());\n return true;\n }\n};\n\nOPERATOR_SCHEMA(GetGPUMemoryUsage)\n .NumInputs(0)\n .NumOutputs(1)\n .SetDoc(R\"DOC(Fetches GPU memory stats from HIPContext. Result is stored\n in output blob with shape (2, num_gpus). First row contains the total\n current memory usage, and the second row the maximum usage during\n this execution.\n\n NOTE: --caffe2_gpu_memory_tracking flag must be enabled to use this op.\n )DOC\");\n\nREGISTER_HIP_OPERATOR(GetGPUMemoryUsage, GetGPUMemoryUsageOp);\n}\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/operators/minmax_ops.h\"\n\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void SelectGradientCUDAKernel(\n const int N,\n const T* dY,\n const T* X,\n const T* Y,\n T* dX) {\n const int i = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x;\n if (i < N) {\n#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)\n dX[i] = __ldg(X + i) == __ldg(Y + i) ? __ldg(dY + i) : T(0);\n#else\n dX[i] = X[i] == Y[i] ? dY[i] : T(0);\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\nbool SelectGradientOpBase::RunOnDevice() {\n const auto& Y = Input(0);\n const auto& dY = Input(1);\n const int N = Y.numel();\n const int M = math::DivUp(N, CAFFE_CUDA_NUM_THREADS);\n const float* dY_data = dY.data();\n const float* Y_data = Y.data();\n for (int i = 0; i < OutputSize(); i++) {\n const auto& Xi = Input(i + 2);\n auto* dXi = Output(i, Xi.sizes(), at::dtype());\n const float* Xi_data = Xi.data();\n float* dXi_data = dXi->mutable_data();\n if (N > 0) {\n SelectGradientCUDAKernel\n <<>>(\n N, dY_data, Xi_data, Y_data, dXi_data);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n }\n }\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(Min, MinOp);\nREGISTER_CUDA_OPERATOR(MinGradient, MinGradientOp);\nREGISTER_CUDA_OPERATOR(Max, MaxOp);\nREGISTER_CUDA_OPERATOR(MaxGradient, MaxGradientOp);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/operators/minmax_ops.h\"\n\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void SelectGradientHIPKernel(\n const int N,\n const T* dY,\n const T* X,\n const T* Y,\n T* dX) {\n const int i = blockIdx.x * CAFFE_HIP_NUM_THREADS + threadIdx.x;\n if (i < N) {\n#if __HIP_ARCH__ >= 350 || defined(USE_ROCM)\n dX[i] = __ldg(X + i) == __ldg(Y + i) ? __ldg(dY + i) : T(0);\n#else\n dX[i] = X[i] == Y[i] ? dY[i] : T(0);\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\nbool SelectGradientOpBase::RunOnDevice() {\n const auto& Y = Input(0);\n const auto& dY = Input(1);\n const int N = Y.numel();\n const int M = math::DivUp(N, CAFFE_HIP_NUM_THREADS);\n const float* dY_data = dY.data();\n const float* Y_data = Y.data();\n for (int i = 0; i < OutputSize(); i++) {\n const auto& Xi = Input(i + 2);\n auto* dXi = Output(i, Xi.sizes(), at::dtype());\n const float* Xi_data = Xi.data();\n float* dXi_data = dXi->mutable_data();\n if (N > 0) {\n hipLaunchKernelGGL(( SelectGradientHIPKernel)\n , dim3(M), dim3(CAFFE_HIP_NUM_THREADS), 0, context_.hip_stream(), \n N, dY_data, Xi_data, Y_data, dXi_data);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n }\n }\n return true;\n}\n\nREGISTER_HIP_OPERATOR(Min, MinOp);\nREGISTER_HIP_OPERATOR(MinGradient, MinGradientOp);\nREGISTER_HIP_OPERATOR(Max, MaxOp);\nREGISTER_HIP_OPERATOR(MaxGradient, MaxGradientOp);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/operators/mod_op.h\"\n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void ModOpSimpleKernel(const int N, const int64_t divisor_,\n const T* data_ptr, T* output_ptr) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n output_ptr[i] = data_ptr[i] % divisor_;\n }\n}\n\n\ntemplate \n__global__ void ModOpKernel(const int N, const int64_t divisor_,\n const T* data_ptr, T* output_ptr) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n output_ptr[i] = data_ptr[i] % divisor_;\n if (output_ptr[i] && ((output_ptr[i] > 0) != (divisor_ > 0))) {\n output_ptr[i] += divisor_;\n }\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool ModOp::DoRunWithType() {\n auto& data = Input(DATA);\n auto N = data.numel();\n const auto* data_ptr = data.template data();\n\n auto* output = Output(0, data.sizes(), at::dtype());\n auto* output_ptr = output->template mutable_data();\n\n if (sign_follow_divisor_) {\n ModOpKernel<<<\n CAFFE_GET_BLOCKS(N),\n CAFFE_CUDA_NUM_THREADS,\n 0,\n context_.cuda_stream()>>>(\n N, divisor_, data_ptr, output_ptr);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n } else {\n ModOpSimpleKernel<<<\n CAFFE_GET_BLOCKS(N),\n CAFFE_CUDA_NUM_THREADS,\n 0,\n context_.cuda_stream()>>>(\n N, divisor_, data_ptr, output_ptr);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n }\n\n return true;\n\n}\n\nREGISTER_CUDA_OPERATOR(Mod, ModOp);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/operators/mod_op.h\"\n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void ModOpSimpleKernel(const int N, const int64_t divisor_,\n const T* data_ptr, T* output_ptr) {\n HIP_1D_KERNEL_LOOP(i, N) {\n output_ptr[i] = data_ptr[i] % divisor_;\n }\n}\n\n\ntemplate \n__global__ void ModOpKernel(const int N, const int64_t divisor_,\n const T* data_ptr, T* output_ptr) {\n HIP_1D_KERNEL_LOOP(i, N) {\n output_ptr[i] = data_ptr[i] % divisor_;\n if (output_ptr[i] && ((output_ptr[i] > 0) != (divisor_ > 0))) {\n output_ptr[i] += divisor_;\n }\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool ModOp::DoRunWithType() {\n auto& data = Input(DATA);\n auto N = data.numel();\n const auto* data_ptr = data.template data();\n\n auto* output = Output(0, data.sizes(), at::dtype());\n auto* output_ptr = output->template mutable_data();\n\n if (sign_follow_divisor_) {\n hipLaunchKernelGGL(( ModOpKernel), \n dim3(CAFFE_GET_BLOCKS(N)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context_.hip_stream(), \n N, divisor_, data_ptr, output_ptr);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n } else {\n hipLaunchKernelGGL(( ModOpSimpleKernel), \n dim3(CAFFE_GET_BLOCKS(N)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context_.hip_stream(), \n N, divisor_, data_ptr, output_ptr);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n }\n\n return true;\n\n}\n\nREGISTER_HIP_OPERATOR(Mod, ModOp);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/multi_class_accuracy_op.h\"\n#include \"caffe2/utils/GpuAtomics.cuh\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\nnamespace {\n__global__ void MultiClassAccuracyKernel(const int N, const int D, const float* Xdata,\n const int* labeldata, float* accuracies, int* amounts) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n float maxval = Xdata[i * D];\n int maxid = 0;\n for (int j = 1; j < D; ++j) {\n if (Xdata[i * D + j] > maxval) {\n maxval = Xdata[i * D + j];\n maxid = j;\n }\n }\n int labelid = labeldata[i];\n if (maxid == labelid) {\n gpu_atomic_add(accuracies + labelid, static_cast(1));\n }\n gpu_atomic_add(amounts + labelid, static_cast(1));\n }\n}\n__global__ void MultiClassAccuracyDivideKernel(\n const int D, float* accuracies, const int* amounts) {\n CUDA_1D_KERNEL_LOOP(i, D) {\n if (amounts[i]) {\n accuracies[i] /= amounts[i];\n }\n }\n}\n} // namespace\n\ntemplate <>\nbool MultiClassAccuracyOp::RunOnDevice() {\n auto& X = Input(PREDICTION);\n auto& label = Input(LABEL);\n\n\n TORCH_DCHECK_EQ(X.dim(), 2);\n // amount, number of instances\n int N = X.dim32(0);\n // dimension, number of classes\n int D = X.dim32(1);\n TORCH_DCHECK_EQ(label.dim(), 1);\n TORCH_DCHECK_EQ(label.dim32(0), N);\n auto* Y0 = Output(0, {D}, at::dtype());\n auto* Y1 = Output(1, {D}, at::dtype());\n\n const float* Xdata = X.data();\n const int* labeldata = label.data();\n float* accuracies = Y0->template mutable_data();\n int* amounts = Y1->template mutable_data();\n math::Set(D, 0.0, accuracies, &context_);\n math::Set(D, 0, amounts, &context_);\n\n MultiClassAccuracyKernel<<>>(\n N, D, Xdata, labeldata, accuracies, amounts);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n MultiClassAccuracyDivideKernel<<>>(\n D, accuracies, amounts);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(\n MultiClassAccuracy, MultiClassAccuracyOp);\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/multi_class_accuracy_op.h\"\n#include \"caffe2/utils/hip/GpuAtomics.cuh\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\nnamespace {\n__global__ void MultiClassAccuracyKernel(const int N, const int D, const float* Xdata,\n const int* labeldata, float* accuracies, int* amounts) {\n HIP_1D_KERNEL_LOOP(i, N) {\n float maxval = Xdata[i * D];\n int maxid = 0;\n for (int j = 1; j < D; ++j) {\n if (Xdata[i * D + j] > maxval) {\n maxval = Xdata[i * D + j];\n maxid = j;\n }\n }\n int labelid = labeldata[i];\n if (maxid == labelid) {\n gpu_atomic_add(accuracies + labelid, static_cast(1));\n }\n gpu_atomic_add(amounts + labelid, static_cast(1));\n }\n}\n__global__ void MultiClassAccuracyDivideKernel(\n const int D, float* accuracies, const int* amounts) {\n HIP_1D_KERNEL_LOOP(i, D) {\n if (amounts[i]) {\n accuracies[i] /= amounts[i];\n }\n }\n}\n} // namespace\n\ntemplate <>\nbool MultiClassAccuracyOp::RunOnDevice() {\n auto& X = Input(PREDICTION);\n auto& label = Input(LABEL);\n\n\n TORCH_DCHECK_EQ(X.dim(), 2);\n // amount, number of instances\n int N = X.dim32(0);\n // dimension, number of classes\n int D = X.dim32(1);\n TORCH_DCHECK_EQ(label.dim(), 1);\n TORCH_DCHECK_EQ(label.dim32(0), N);\n auto* Y0 = Output(0, {D}, at::dtype());\n auto* Y1 = Output(1, {D}, at::dtype());\n\n const float* Xdata = X.data();\n const int* labeldata = label.data();\n float* accuracies = Y0->template mutable_data();\n int* amounts = Y1->template mutable_data();\n math::Set(D, 0.0, accuracies, &context_);\n math::Set(D, 0, amounts, &context_);\n\n hipLaunchKernelGGL(( MultiClassAccuracyKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_HIP_NUM_THREADS),\n 0, context_.hip_stream(), \n N, D, Xdata, labeldata, accuracies, amounts);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n hipLaunchKernelGGL(( MultiClassAccuracyDivideKernel), dim3(CAFFE_GET_BLOCKS(D)), dim3(CAFFE_HIP_NUM_THREADS),\n 0, context_.hip_stream(), \n D, accuracies, amounts);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(\n MultiClassAccuracy, MultiClassAccuracyOp);\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/negate_gradient_op.h\"\n\nnamespace caffe2 {\nREGISTER_CUDA_OPERATOR(NegateGradient, NegateGradientOp)\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/negate_gradient_op.h\"\n\nnamespace caffe2 {\nREGISTER_HIP_OPERATOR(NegateGradient, NegateGradientOp)\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/operators/negative_op.h\"\n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_CUDA_OPERATOR(\n Negative,\n UnaryElementwiseOp<\n NumericTypes,\n CUDAContext,\n NegativeFunctor>);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/operators/negative_op.h\"\n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_HIP_OPERATOR(\n Negative,\n UnaryElementwiseOp<\n NumericTypes,\n HIPContext,\n NegativeFunctor>);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \n\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/one_hot_ops.h\"\n#include \"caffe2/utils/cub_namespace.cuh\"\n\nnamespace caffe2 {\n\n__global__ void OneHotOpKernel(\n const int64_t batch_size,\n const int64_t index_size,\n const int64_t* indices,\n float* output) {\n CUDA_1D_KERNEL_LOOP(i, batch_size) {\n output[i * index_size + indices[i]] = 1.;\n }\n}\n\ntemplate <>\nvoid OneHotOp::DoOneHotOp(\n int64_t batch_size,\n int64_t index_size,\n const Tensor& indices,\n Tensor* output) {\n float* output_ptr = output->template mutable_data();\n math::Set(output->numel(), 0., output_ptr, &context_);\n OneHotOpKernel<<<\n CAFFE_GET_BLOCKS(batch_size),\n CAFFE_CUDA_NUM_THREADS,\n 0,\n context_.cuda_stream()>>>(\n batch_size, index_size, indices.data(), output_ptr);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n}\n\nREGISTER_CUDA_OPERATOR(OneHot, OneHotOp);\n} // namespace\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \n\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/one_hot_ops.h\"\n#include \"caffe2/utils/cub_namespace.cuh\"\n\nnamespace caffe2 {\n\n__global__ void OneHotOpKernel(\n const int64_t batch_size,\n const int64_t index_size,\n const int64_t* indices,\n float* output) {\n HIP_1D_KERNEL_LOOP(i, batch_size) {\n output[i * index_size + indices[i]] = 1.;\n }\n}\n\ntemplate <>\nvoid OneHotOp::DoOneHotOp(\n int64_t batch_size,\n int64_t index_size,\n const Tensor& indices,\n Tensor* output) {\n float* output_ptr = output->template mutable_data();\n math::Set(output->numel(), 0., output_ptr, &context_);\n hipLaunchKernelGGL(( OneHotOpKernel), \n dim3(CAFFE_GET_BLOCKS(batch_size)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context_.hip_stream(), \n batch_size, index_size, indices.data(), output_ptr);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n}\n\nREGISTER_HIP_OPERATOR(OneHot, OneHotOp);\n} // namespace\n###" }, { "cuda": "\n#ifndef CAFFE2_OPERATORS_OPERATOR_FALLBACK_H_\n#define CAFFE2_OPERATORS_OPERATOR_FALLBACK_H_\n#include \"caffe2/core/common.h\"\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/proto/caffe2_pb.h\"\nnamespace caffe2 {\n\ntemplate \nclass GPUFallbackOpEx final : public Operator {\n public:\n USE_OPERATOR_FUNCTIONS(CUDAContext);\n explicit GPUFallbackOpEx(const OperatorDef& def, Workspace* ws)\n : Operator(def, ws) {\n CAFFE_ENFORCE_EQ(def.device_option().device_type(), PROTO_CUDA);\n OperatorDef base_def_(def);\n \n base_def_.clear_device_option();\n base_def_.mutable_device_option()->set_device_type(PROTO_CPU);\n \n for (const string& name : def.input()) {\n local_input_blobs_.push_back(local_ws_.CreateBlob(name));\n TORCH_CHECK_NOTNULL(local_input_blobs_.back());\n }\n base_op_ = CreateOperator(base_def_, &local_ws_);\n for (const string& name : def.output()) {\n local_output_blobs_.push_back(local_ws_.GetBlob(name));\n TORCH_CHECK_NOTNULL(local_output_blobs_.back());\n }\n }\n bool RunOnDevice() override {\n for (const auto i : c10::irange(InputSize())) {\n if (this->InputIsTensorType(i, CUDA)) {\n \n BlobGetMutableTensor(local_input_blobs_[i], CPU)->CopyFrom(Input(i));\n } else {\n VLOG(1) << \"Input \" << i << \" is not TensorCUDA. Skipping copy.\";\n \n \n \n local_input_blobs_[i]->ShareExternal(\n const_cast(OperatorBase::Inputs()[i]->GetRaw()), OperatorBase::Inputs()[i]->meta());\n }\n }\n if (!base_op_->Run()) {\n LOG(ERROR) << \"Base op run failed in GPUFallbackOp. Def: \"\n << ProtoDebugString(this->debug_def());\n return false;\n }\n for (const auto i : c10::irange(OutputSize())) {\n if (SkipOutputCopy::Contains(i)) {\n VLOG(1) << \"Copy output: index \" << i << \" skipped.\";\n continue;\n }\n CAFFE_ENFORCE(\n BlobIsTensorType(*local_output_blobs_[i], CPU), \"GPU fallback op currently does not support non-TensorCPU \"\n \"output type who needs copying.\");\n Output(i)->CopyFrom(local_output_blobs_[i]->template Get());\n }\n return true;\n }\n protected:\n Workspace local_ws_;\n vector local_input_blobs_;\n vector local_output_blobs_;\n unique_ptr base_op_;\n};\nusing GPUFallbackOp = GPUFallbackOpEx>;\n} \n#endif \n\n###", "hip": " \n#ifndef CAFFE2_OPERATORS_OPERATOR_FALLBACK_H_\n#define CAFFE2_OPERATORS_OPERATOR_FALLBACK_H_\n#include \"caffe2/core/common.h\"\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/proto/caffe2_pb.h\"\nnamespace caffe2 {\n\ntemplate \nclass GPUFallbackOpEx final : public Operator {\n public:\n USE_OPERATOR_FUNCTIONS(HIPContext);\n explicit GPUFallbackOpEx(const OperatorDef& def, Workspace* ws)\n : Operator(def, ws) {\n CAFFE_ENFORCE_EQ(def.device_option().device_type(), PROTO_HIP);\n OperatorDef base_def_(def);\n \n base_def_.clear_device_option();\n base_def_.mutable_device_option()->set_device_type(PROTO_CPU);\n \n for (const string& name : def.input()) {\n local_input_blobs_.push_back(local_ws_.CreateBlob(name));\n TORCH_CHECK_NOTNULL(local_input_blobs_.back());\n }\n base_op_ = CreateOperator(base_def_, &local_ws_);\n for (const string& name : def.output()) {\n local_output_blobs_.push_back(local_ws_.GetBlob(name));\n TORCH_CHECK_NOTNULL(local_output_blobs_.back());\n }\n }\n bool RunOnDevice() override {\n for (const auto i : c10::irange(InputSize())) {\n if (this->InputIsTensorType(i, HIP)) {\n \n BlobGetMutableTensor(local_input_blobs_[i], CPU)->CopyFrom(Input(i));\n } else {\n VLOG(1) << \"Input \" << i << \" is not TensorHIP. Skipping copy.\";\n \n \n \n local_input_blobs_[i]->ShareExternal(\n const_cast(OperatorBase::Inputs()[i]->GetRaw()), OperatorBase::Inputs()[i]->meta());\n }\n }\n if (!base_op_->Run()) {\n LOG(ERROR) << \"Base op run failed in GPUFallbackOp. Def: \"\n << ProtoDebugString(this->debug_def());\n return false;\n }\n for (const auto i : c10::irange(OutputSize())) {\n if (SkipOutputCopy::Contains(i)) {\n VLOG(1) << \"Copy output: index \" << i << \" skipped.\";\n continue;\n }\n CAFFE_ENFORCE(\n BlobIsTensorType(*local_output_blobs_[i], CPU), \"GPU fallback op currently does not support non-TensorCPU \"\n \"output type who needs copying.\");\n Output(i)->CopyFrom(local_output_blobs_[i]->template Get());\n }\n return true;\n }\n protected:\n Workspace local_ws_;\n vector local_input_blobs_;\n vector local_output_blobs_;\n unique_ptr base_op_;\n};\nusing GPUFallbackOp = GPUFallbackOpEx>;\n} \n#endif ###" }, { "cuda": "\n#include \n#include \"caffe2/core/operator.h\"\n#include \"caffe2/operators/operator_fallback_gpu.h\"\n#include \nnamespace caffe2 {\nclass IncrementByOneOp final : public Operator {\n public:\n template \n explicit IncrementByOneOp(Args&&... args)\n : Operator(std::forward(args)...) {}\n bool RunOnDevice() override {\n const auto& in = Input(0);\n auto* out = Output(0, in.sizes(), at::dtype());\n const float* in_data = in.template data();\n float* out_data = out->template mutable_data();\n for (int i = 0; i < in.numel(); ++i) {\n out_data[i] = in_data[i] + 1.f;\n }\n return true;\n }\n};\nOPERATOR_SCHEMA(IncrementByOne)\n .NumInputs(1).NumOutputs(1).AllowInplace({{0, 0}});\nREGISTER_CPU_OPERATOR(IncrementByOne, IncrementByOneOp);\nREGISTER_CUDA_OPERATOR(IncrementByOne, GPUFallbackOp);\nTEST(OperatorFallbackTest, IncrementByOneOp) {\n OperatorDef op_def = CreateOperatorDef(\n \"IncrementByOne\", \"\", vector{\"X\"}, vector{\"X\"});\n Workspace ws;\n Tensor source_tensor(vector{2, 3}, CPU);\n for (int i = 0; i < 6; ++i) {\n source_tensor.mutable_data()[i] = i;\n }\n BlobGetMutableTensor(ws.CreateBlob(\"X\"), CPU)->CopyFrom(source_tensor);\n unique_ptr op(CreateOperator(op_def, &ws));\n EXPECT_TRUE(op.get() != nullptr);\n EXPECT_TRUE(op->Run());\n const TensorCPU& output = ws.GetBlob(\"X\")->Get();\n EXPECT_EQ(output.dim(), 2);\n EXPECT_EQ(output.size(0), 2);\n EXPECT_EQ(output.size(1), 3);\n for (int i = 0; i < 6; ++i) {\n EXPECT_EQ(output.data()[i], i + 1);\n }\n}\nTEST(OperatorFallbackTest, GPUIncrementByOneOp) {\n if (!HasCudaGPU()) return;\n OperatorDef op_def = CreateOperatorDef(\n \"IncrementByOne\", \"\", vector{\"X\"}, vector{\"X\"});\n op_def.mutable_device_option()->set_device_type(PROTO_CUDA);\n Workspace ws;\n Tensor source_tensor(vector{2, 3}, CPU);\n for (int i = 0; i < 6; ++i) {\n source_tensor.mutable_data()[i] = i;\n }\n BlobGetMutableTensor(ws.CreateBlob(\"X\"), CUDA)->CopyFrom(source_tensor);\n unique_ptr op(CreateOperator(op_def, &ws));\n EXPECT_TRUE(op.get() != nullptr);\n EXPECT_TRUE(op->Run());\n const TensorCUDA& output = ws.GetBlob(\"X\")->Get();\n Tensor output_cpu(output, CPU);\n EXPECT_EQ(output.dim(), 2);\n EXPECT_EQ(output.size(0), 2);\n EXPECT_EQ(output.size(1), 3);\n for (int i = 0; i < 6; ++i) {\n EXPECT_EQ(output_cpu.data()[i], i + 1);\n }\n}\n} \n\n###", "hip": " \n#include \n#include \"caffe2/core/operator.h\"\n#include \"caffe2/operators/hip/operator_fallback_gpu.h\"\n#include \nnamespace caffe2 {\nclass IncrementByOneOp final : public Operator {\n public:\n template \n explicit IncrementByOneOp(Args&&... args)\n : Operator(std::forward(args)...) {}\n bool RunOnDevice() override {\n const auto& in = Input(0);\n auto* out = Output(0, in.sizes(), at::dtype());\n const float* in_data = in.template data();\n float* out_data = out->template mutable_data();\n for (int i = 0; i < in.numel(); ++i) {\n out_data[i] = in_data[i] + 1.f;\n }\n return true;\n }\n};\nOPERATOR_SCHEMA(IncrementByOne)\n .NumInputs(1).NumOutputs(1).AllowInplace({{0, 0}});\nREGISTER_CPU_OPERATOR(IncrementByOne, IncrementByOneOp);\nREGISTER_HIP_OPERATOR(IncrementByOne, GPUFallbackOp);\nTEST(OperatorFallbackTest, IncrementByOneOp) {\n OperatorDef op_def = CreateOperatorDef(\n \"IncrementByOne\", \"\", vector{\"X\"}, vector{\"X\"});\n Workspace ws;\n Tensor source_tensor(vector{2, 3}, CPU);\n for (int i = 0; i < 6; ++i) {\n source_tensor.mutable_data()[i] = i;\n }\n BlobGetMutableTensor(ws.CreateBlob(\"X\"), CPU)->CopyFrom(source_tensor);\n unique_ptr op(CreateOperator(op_def, &ws));\n EXPECT_TRUE(op.get() != nullptr);\n EXPECT_TRUE(op->Run());\n const TensorCPU& output = ws.GetBlob(\"X\")->Get();\n EXPECT_EQ(output.dim(), 2);\n EXPECT_EQ(output.size(0), 2);\n EXPECT_EQ(output.size(1), 3);\n for (int i = 0; i < 6; ++i) {\n EXPECT_EQ(output.data()[i], i + 1);\n }\n}\nTEST(OperatorFallbackTest, GPUIncrementByOneOp) {\n if (!HasHipGPU()) return;\n OperatorDef op_def = CreateOperatorDef(\n \"IncrementByOne\", \"\", vector{\"X\"}, vector{\"X\"});\n op_def.mutable_device_option()->set_device_type(PROTO_HIP);\n Workspace ws;\n Tensor source_tensor(vector{2, 3}, CPU);\n for (int i = 0; i < 6; ++i) {\n source_tensor.mutable_data()[i] = i;\n }\n BlobGetMutableTensor(ws.CreateBlob(\"X\"), HIP)->CopyFrom(source_tensor);\n unique_ptr op(CreateOperator(op_def, &ws));\n EXPECT_TRUE(op.get() != nullptr);\n EXPECT_TRUE(op->Run());\n const TensorHIP& output = ws.GetBlob(\"X\")->Get();\n Tensor output_cpu(output, CPU);\n EXPECT_EQ(output.dim(), 2);\n EXPECT_EQ(output.size(0), 2);\n EXPECT_EQ(output.size(1), 3);\n for (int i = 0; i < 6; ++i) {\n EXPECT_EQ(output_cpu.data()[i], i + 1);\n }\n}\n} ###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#define _USE_MATH_DEFINES\n\n#include \n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\n// -----------------------------------\n// prelu\n// -----------------------------------\nvoid prelu_kernel(TensorIterator &iter) {\n AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, iter.dtype(), \"prelu_cuda\", [&] {\n gpu_kernel(iter,\n [] GPU_LAMBDA (scalar_t input, scalar_t weight) -> scalar_t {\n return (input > 0) ? input : weight * input;\n });\n });\n}\n\nvoid prelu_backward_kernel(TensorIterator &iter) {\n AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, iter.dtype(), \"prelu_backward_cuda\", [&] {\n gpu_kernel_multiple_outputs(iter,\n [] GPU_LAMBDA (scalar_t input, scalar_t weight, scalar_t grad) -> thrust::tuple {\n auto mask = input > 0;\n auto grad_input = mask ? grad : weight * grad;\n auto grad_weight = mask ? scalar_t{0} : input * grad;\n return {grad_input, grad_weight};\n });\n });\n}\n\nREGISTER_DISPATCH(prelu_stub, &prelu_kernel);\nREGISTER_DISPATCH(prelu_backward_stub, &prelu_backward_kernel);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#define _USE_MATH_DEFINES\n\n#include \n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\n// -----------------------------------\n// prelu\n// -----------------------------------\nvoid prelu_kernel(TensorIterator &iter) {\n AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, iter.dtype(), \"prelu_hip\", [&] {\n gpu_kernel(iter,\n [] GPU_LAMBDA (scalar_t input, scalar_t weight) -> scalar_t {\n return (input > 0) ? input : weight * input;\n });\n });\n}\n\nvoid prelu_backward_kernel(TensorIterator &iter) {\n AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, iter.dtype(), \"prelu_backward_hip\", [&] {\n gpu_kernel_multiple_outputs(iter,\n [] GPU_LAMBDA (scalar_t input, scalar_t weight, scalar_t grad) -> thrust::tuple {\n auto mask = input > 0;\n auto grad_input = mask ? grad : weight * grad;\n auto grad_weight = mask ? scalar_t{0} : input * grad;\n return {grad_input, grad_weight};\n });\n });\n}\n\nREGISTER_DISPATCH(prelu_stub, &prelu_kernel);\nREGISTER_DISPATCH(prelu_backward_stub, &prelu_backward_kernel);\n\n} // namespace at::native\n###" }, { "cuda": "\n#include \"caffe2/operators/order_switch_ops.h\"\n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_CUDA_OPERATOR(NHWC2NCHW, NHWC2NCHWOp);\nREGISTER_CUDA_OPERATOR(NCHW2NHWC, NCHW2NHWCOp);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/operators/order_switch_ops.h\"\n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_HIP_OPERATOR(NHWC2NCHW, NHWC2NCHWOp);\nREGISTER_HIP_OPERATOR(NCHW2NHWC, NCHW2NHWCOp);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/perplexity_op.h\"\n#include \"caffe2/utils/math.h\"\n#include \n#include \n#include \n\nnamespace caffe2 {\n\nstruct perplexity_function\n{\n perplexity_function(float p) : pow(p) {}\n __host__ __device__ float operator()(float x) const\n {\n return powf(1.0f/x, pow);\n }\n float pow;\n};\n\ntemplate <>\nbool PerplexityOp::RunOnDevice() {\n auto& X = Input(0);\n\n TORCH_DCHECK_EQ(X.dim(), 1);\n int N = X.dim32(0);\n\n auto* Y = Output(0, vector(), at::dtype());\n float* Ydata = Y->template mutable_data();\n const float* Xdata = X.data();\n\n float perplexity = thrust::transform_reduce(\n #if THRUST_VERSION >= 100800\n thrust::cuda::par.on(context_.cuda_stream()),\n #endif // THRUST_VERSION >= 100800\n Xdata, Xdata + N,\n perplexity_function(1.0f/N),\n 1.0f,\n thrust::multiplies());\n\n math::Set(1, perplexity, Ydata, &context_);\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(Perplexity, PerplexityOp);\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/perplexity_op.h\"\n#include \"caffe2/utils/math.h\"\n#include \n#include \n#include \n\nnamespace caffe2 {\n\nstruct perplexity_function\n{\n perplexity_function(float p) : pow(p) {}\n __host__ __device__ float operator()(float x) const\n {\n return powf(1.0f/x, pow);\n }\n float pow;\n};\n\ntemplate <>\nbool PerplexityOp::RunOnDevice() {\n auto& X = Input(0);\n\n TORCH_DCHECK_EQ(X.dim(), 1);\n int N = X.dim32(0);\n\n auto* Y = Output(0, vector(), at::dtype());\n float* Ydata = Y->template mutable_data();\n const float* Xdata = X.data();\n\n float perplexity = thrust::transform_reduce(\n #if THRUST_VERSION >= 100800\n thrust::hip::par.on(context_.hip_stream()),\n #endif // THRUST_VERSION >= 100800\n Xdata, Xdata + N,\n perplexity_function(1.0f/N),\n 1.0f,\n thrust::multiplies());\n\n math::Set(1, perplexity, Ydata, &context_);\n return true;\n}\n\nREGISTER_HIP_OPERATOR(Perplexity, PerplexityOp);\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/prepend_dim_op.h\"\n\nnamespace caffe2 {\n\nREGISTER_CUDA_OPERATOR(PrependDim, PrependDimOp);\nREGISTER_CUDA_OPERATOR(MergeDim, MergeDimOp);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/prepend_dim_op.h\"\n\nnamespace caffe2 {\n\nREGISTER_HIP_OPERATOR(PrependDim, PrependDimOp);\nREGISTER_HIP_OPERATOR(MergeDim, MergeDimOp);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/operators/reciprocal_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void\nReciprocalGradientCUDAKernel(const int N, const T* dY, const T* Y, T* dX) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n#if __CUDA_ARCH__ >= 350\n dX[i] = __ldg(dY + i) * (-__ldg(Y + i) * __ldg(Y + i));\n#else\n dX[i] = dY[i] * (-Y[i] * Y[i]);\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool ReciprocalGradientFunctor::Forward(\n const std::vector& Y_dims,\n const std::vector& /* dY_dims */,\n const T* Y,\n const T* dY,\n T* dX,\n CUDAContext* context) const {\n const int size = std::accumulate(\n Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies());\n ReciprocalGradientCUDAKernel\n <<cuda_stream()>>>(size, dY, Y, dX);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(\n Reciprocal,\n UnaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n ReciprocalFunctor>);\nREGISTER_CUDA_OPERATOR(\n ReciprocalGradient,\n BinaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n ReciprocalGradientFunctor>);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/operators/reciprocal_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void\nReciprocalGradientHIPKernel(const int N, const T* dY, const T* Y, T* dX) {\n HIP_1D_KERNEL_LOOP(i, N) {\n#if __HIP_ARCH__ >= 350\n dX[i] = __ldg(dY + i) * (-__ldg(Y + i) * __ldg(Y + i));\n#else\n dX[i] = dY[i] * (-Y[i] * Y[i]);\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool ReciprocalGradientFunctor::Forward(\n const std::vector& Y_dims,\n const std::vector& /* dY_dims */,\n const T* Y,\n const T* dY,\n T* dX,\n HIPContext* context) const {\n const int size = std::accumulate(\n Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies());\n hipLaunchKernelGGL(( ReciprocalGradientHIPKernel)\n , dim3(CAFFE_GET_BLOCKS(size)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context->hip_stream(), size, dY, Y, dX);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(\n Reciprocal,\n UnaryElementwiseOp<\n TensorTypes,\n HIPContext,\n ReciprocalFunctor>);\nREGISTER_HIP_OPERATOR(\n ReciprocalGradient,\n BinaryElementwiseOp<\n TensorTypes,\n HIPContext,\n ReciprocalGradientFunctor>);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/operators/relu_n_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void\nReluNCUDAKernel(const int N, const T threshold, const T* X, T* Y) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n#if __CUDA_ARCH__ >= 350\n Y[i] = __ldg(X + i) > 0\n ? (__ldg(X + i) < threshold ? __ldg(X + i) : threshold)\n : T(0);\n#else\n Y[i] = X[i] > 0 ? (X[i] < threshold ? X[i] : threshold) : T(0);\n#endif\n }\n}\n\ntemplate \n__global__ void ReluNGradientCUDAKernel(\n const int N,\n const T threshold,\n const T* dY,\n const T* Y,\n T* dX) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n#if __CUDA_ARCH__ >= 350\n dX[i] = (__ldg(Y + i) > 0 && __ldg(Y + i) < threshold) ? dY[i] : T(0);\n#else\n dX[i] = (Y[i] > 0 && Y[i] < threshold) ? dY[i] : T(0);\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool ReluNFunctor::\noperator()(const int N, const T* X, T* Y, CUDAContext* context) const {\n ReluNCUDAKernel\n <<cuda_stream()>>>(N, n, X, Y);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\ntemplate <>\ntemplate \nbool ReluNGradientFunctor::Forward(\n const std::vector& Y_dims,\n const std::vector& /* dY_dims */,\n const T* Y,\n const T* dY,\n T* dX,\n CUDAContext* context) const {\n const int size = std::accumulate(\n Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies());\n ReluNGradientCUDAKernel\n <<cuda_stream()>>>(size, n, dY, Y, dX);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(\n ReluN,\n UnaryElementwiseWithArgsOp<\n TensorTypes,\n CUDAContext,\n ReluNFunctor>);\nREGISTER_CUDA_OPERATOR(\n ReluNGradient,\n BinaryElementwiseWithArgsOp<\n TensorTypes,\n CUDAContext,\n ReluNGradientFunctor>);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/operators/relu_n_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void\nReluNHIPKernel(const int N, const T threshold, const T* X, T* Y) {\n HIP_1D_KERNEL_LOOP(i, N) {\n#if __HIP_ARCH__ >= 350\n Y[i] = __ldg(X + i) > 0\n ? (__ldg(X + i) < threshold ? __ldg(X + i) : threshold)\n : T(0);\n#else\n Y[i] = X[i] > 0 ? (X[i] < threshold ? X[i] : threshold) : T(0);\n#endif\n }\n}\n\ntemplate \n__global__ void ReluNGradientHIPKernel(\n const int N,\n const T threshold,\n const T* dY,\n const T* Y,\n T* dX) {\n HIP_1D_KERNEL_LOOP(i, N) {\n#if __HIP_ARCH__ >= 350\n dX[i] = (__ldg(Y + i) > 0 && __ldg(Y + i) < threshold) ? dY[i] : T(0);\n#else\n dX[i] = (Y[i] > 0 && Y[i] < threshold) ? dY[i] : T(0);\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool ReluNFunctor::\noperator()(const int N, const T* X, T* Y, HIPContext* context) const {\n hipLaunchKernelGGL(( ReluNHIPKernel)\n , dim3(CAFFE_GET_BLOCKS(N)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context->hip_stream(), N, n, X, Y);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\ntemplate <>\ntemplate \nbool ReluNGradientFunctor::Forward(\n const std::vector& Y_dims,\n const std::vector& /* dY_dims */,\n const T* Y,\n const T* dY,\n T* dX,\n HIPContext* context) const {\n const int size = std::accumulate(\n Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies());\n hipLaunchKernelGGL(( ReluNGradientHIPKernel)\n , dim3(CAFFE_GET_BLOCKS(size)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context->hip_stream(), size, n, dY, Y, dX);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(\n ReluN,\n UnaryElementwiseWithArgsOp<\n TensorTypes,\n HIPContext,\n ReluNFunctor>);\nREGISTER_HIP_OPERATOR(\n ReluNGradient,\n BinaryElementwiseWithArgsOp<\n TensorTypes,\n HIPContext,\n ReluNGradientFunctor>);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/replace_nan_op.h\"\n\nnamespace caffe2 {\n\nnamespace {\ntemplate \n__global__ void\nreplace_nan_kernel(const T value, const int64_t size, const T* X, T* Y) {\n CUDA_1D_KERNEL_LOOP(i, size) {\n if (isnan(X[i])) {\n Y[i] = value;\n } else {\n Y[i] = X[i];\n }\n }\n}\n} // namespace\n\ntemplate <>\ntemplate \nvoid ReplaceNaNOp::ReplaceNaN(\n const T& value,\n const int64_t size,\n const T* X,\n T* Y) {\n replace_nan_kernel<<<\n CAFFE_GET_BLOCKS(size),\n CAFFE_CUDA_NUM_THREADS,\n 0,\n context_.cuda_stream()>>>(value, size, X, Y);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n}\nREGISTER_CUDA_OPERATOR(ReplaceNaN, ReplaceNaNOp);\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/replace_nan_op.h\"\n\nnamespace caffe2 {\n\nnamespace {\ntemplate \n__global__ void\nreplace_nan_kernel(const T value, const int64_t size, const T* X, T* Y) {\n HIP_1D_KERNEL_LOOP(i, size) {\n if (isnan(X[i])) {\n Y[i] = value;\n } else {\n Y[i] = X[i];\n }\n }\n}\n} // namespace\n\ntemplate <>\ntemplate \nvoid ReplaceNaNOp::ReplaceNaN(\n const T& value,\n const int64_t size,\n const T* X,\n T* Y) {\n hipLaunchKernelGGL(( replace_nan_kernel), \n dim3(CAFFE_GET_BLOCKS(size)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context_.hip_stream(), value, size, X, Y);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n}\nREGISTER_HIP_OPERATOR(ReplaceNaN, ReplaceNaNOp);\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/reshape_op.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\nREGISTER_CUDA_OPERATOR(Reshape, ReshapeOp);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/reshape_op.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\nREGISTER_HIP_OPERATOR(Reshape, ReshapeOp);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \n\n#include \n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/core/flags.h\"\n#include \"caffe2/operators/reshape_op.h\"\n#include \"caffe2/utils/math.h\"\n\nC10_DECLARE_string(caffe_test_root);\n\nnamespace caffe2 {\n\nstatic void AddConstInput(\n const vector& shape,\n const float value,\n const string& name,\n Workspace* ws) {\n DeviceOption option;\n option.set_device_type(PROTO_CUDA);\n CUDAContext context(option);\n Blob* blob = ws->CreateBlob(name);\n auto* tensor = BlobGetMutableTensor(blob, CUDA);\n tensor->Resize(shape);\n math::Set(\n tensor->numel(), value, tensor->template mutable_data(), &context);\n return;\n}\n\nTEST(ReshapeOpGPUTest, testReshapeWithScalar) {\n if (!HasCudaGPU())\n return;\n Workspace ws;\n OperatorDef def;\n def.set_name(\"test_reshape\");\n def.set_type(\"Reshape\");\n def.add_input(\"X\");\n def.add_output(\"XNew\");\n def.add_output(\"OldShape\");\n def.add_arg()->CopyFrom(MakeArgument(\"shape\", vector{1}));\n def.mutable_device_option()->set_device_type(PROTO_CUDA);\n AddConstInput(vector(), 3.14, \"X\", &ws);\n // execute the op\n unique_ptr op(CreateOperator(def, &ws));\n EXPECT_TRUE(op->Run());\n Blob* XNew = ws.GetBlob(\"XNew\");\n const Tensor& XNewTensor = XNew->Get();\n EXPECT_EQ(1, XNewTensor.dim());\n EXPECT_EQ(1, XNewTensor.numel());\n}\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \n\n#include \n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/core/flags.h\"\n#include \"caffe2/operators/reshape_op.h\"\n#include \"caffe2/utils/math.h\"\n\nC10_DECLARE_string(caffe_test_root);\n\nnamespace caffe2 {\n\nstatic void AddConstInput(\n const vector& shape,\n const float value,\n const string& name,\n Workspace* ws) {\n DeviceOption option;\n option.set_device_type(PROTO_HIP);\n HIPContext context(option);\n Blob* blob = ws->CreateBlob(name);\n auto* tensor = BlobGetMutableTensor(blob, HIP);\n tensor->Resize(shape);\n math::Set(\n tensor->numel(), value, tensor->template mutable_data(), &context);\n return;\n}\n\nTEST(ReshapeOpGPUTest, testReshapeWithScalar) {\n if (!HasHipGPU())\n return;\n Workspace ws;\n OperatorDef def;\n def.set_name(\"test_reshape\");\n def.set_type(\"Reshape\");\n def.add_input(\"X\");\n def.add_output(\"XNew\");\n def.add_output(\"OldShape\");\n def.add_arg()->CopyFrom(MakeArgument(\"shape\", vector{1}));\n def.mutable_device_option()->set_device_type(PROTO_HIP);\n AddConstInput(vector(), 3.14, \"X\", &ws);\n // execute the op\n unique_ptr op(CreateOperator(def, &ws));\n EXPECT_TRUE(op->Run());\n Blob* XNew = ws.GetBlob(\"XNew\");\n const Tensor& XNewTensor = XNew->Get();\n EXPECT_EQ(1, XNewTensor.dim());\n EXPECT_EQ(1, XNewTensor.numel());\n}\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/reverse_packed_segs_op.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__\nvoid ReversePackedSegments_kernel(\n size_t max_length,\n size_t batch_size,\n size_t block_size,\n const LengthType* lengths_ptr,\n const T* data_ptr,\n T* rev_data_ptr) {\n\n const int block_id = blockIdx.x;\n\n // index into [0, batch_size)\n const int batch = block_id / max_length;\n // index into [0, segment)\n const int segment = block_id % max_length;\n\n if (batch >= batch_size || segment >= max_length) return;\n\n const int seg_length = lengths_ptr[batch];\n\n // unique data pointer for this CTA\n const T* local_data_ptr = data_ptr + (segment * batch_size + batch) * block_size;\n\n // unique pointer for result\n T* local_rev_data_ptr;\n if (segment < seg_length) {\n local_rev_data_ptr = rev_data_ptr + ((seg_length - 1 - segment) * batch_size + batch) * block_size;\n } else {\n local_rev_data_ptr = rev_data_ptr + (segment * batch_size + batch) * block_size;\n }\n\n // copy using 1 element / thread for now\n for (int idx = threadIdx.x; idx < block_size; idx+=blockDim.x) {\n local_rev_data_ptr[idx] = local_data_ptr[idx];\n }\n}\n\n} // namespace\n\n// specialization of DoRunWithLengthType\ntemplate <>\ntemplate \nvoid ReversePackedSegsOp::DoRunWithLengthType() {\n const auto& data = Input(DATA);\n const auto& lengths = Input(LENGTHS);\n\n CAFFE_ENFORCE(\n data.dim() == 3,\n \"DATA should be 3-D tensor \");\n CAFFE_ENFORCE(lengths.dim() == 1, \"LENGTH should be 1-D\");\n\n auto* output = Output(0, data.sizes(), at::dtype());\n\n const auto max_length = data.size(0);\n const auto batch_size = data.size(1);\n const auto block_size = data.size(2);\n CAFFE_ENFORCE(\n lengths.sizes()[0] == batch_size,\n \"lenths size should be\"\n \" equal to batch size\");\n\n const T* data_ptr = data.template data();\n const LengthType* lengths_ptr = lengths.template data();\n\n // reversed data\n T* rev_data_ptr = output->template mutable_data();\n\n const int grid = max_length * batch_size;\n\n ReversePackedSegments_kernel<<>>(\n max_length,\n batch_size,\n block_size,\n lengths_ptr,\n data_ptr,\n rev_data_ptr);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n}\n\nREGISTER_CUDA_OPERATOR(ReversePackedSegs, ReversePackedSegsOp);\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/reverse_packed_segs_op.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__\nvoid ReversePackedSegments_kernel(\n size_t max_length,\n size_t batch_size,\n size_t block_size,\n const LengthType* lengths_ptr,\n const T* data_ptr,\n T* rev_data_ptr) {\n\n const int block_id = blockIdx.x;\n\n // index into [0, batch_size)\n const int batch = block_id / max_length;\n // index into [0, segment)\n const int segment = block_id % max_length;\n\n if (batch >= batch_size || segment >= max_length) return;\n\n const int seg_length = lengths_ptr[batch];\n\n // unique data pointer for this CTA\n const T* local_data_ptr = data_ptr + (segment * batch_size + batch) * block_size;\n\n // unique pointer for result\n T* local_rev_data_ptr;\n if (segment < seg_length) {\n local_rev_data_ptr = rev_data_ptr + ((seg_length - 1 - segment) * batch_size + batch) * block_size;\n } else {\n local_rev_data_ptr = rev_data_ptr + (segment * batch_size + batch) * block_size;\n }\n\n // copy using 1 element / thread for now\n for (int idx = threadIdx.x; idx < block_size; idx+=blockDim.x) {\n local_rev_data_ptr[idx] = local_data_ptr[idx];\n }\n}\n\n} // namespace\n\n// specialization of DoRunWithLengthType\ntemplate <>\ntemplate \nvoid ReversePackedSegsOp::DoRunWithLengthType() {\n const auto& data = Input(DATA);\n const auto& lengths = Input(LENGTHS);\n\n CAFFE_ENFORCE(\n data.dim() == 3,\n \"DATA should be 3-D tensor \");\n CAFFE_ENFORCE(lengths.dim() == 1, \"LENGTH should be 1-D\");\n\n auto* output = Output(0, data.sizes(), at::dtype());\n\n const auto max_length = data.size(0);\n const auto batch_size = data.size(1);\n const auto block_size = data.size(2);\n CAFFE_ENFORCE(\n lengths.sizes()[0] == batch_size,\n \"lenths size should be\"\n \" equal to batch size\");\n\n const T* data_ptr = data.template data();\n const LengthType* lengths_ptr = lengths.template data();\n\n // reversed data\n T* rev_data_ptr = output->template mutable_data();\n\n const int grid = max_length * batch_size;\n\n hipLaunchKernelGGL(( ReversePackedSegments_kernel), dim3(grid), dim3(512), 0, context_.hip_stream(), \n max_length,\n batch_size,\n block_size,\n lengths_ptr,\n data_ptr,\n rev_data_ptr);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n}\n\nREGISTER_HIP_OPERATOR(ReversePackedSegs, ReversePackedSegsOp);\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/operators/rsqrt_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void\nRsqrtGradientCUDAKernel(const int size, const T* dY, const T* Y, T* dX) {\n CUDA_1D_KERNEL_LOOP(i, size) {\n#if __CUDA_ARCH__ >= 350\n dX[i] = __ldg(dY + i) * math::utils::Cube(__ldg(Y + i)) *\n static_cast(-0.5);\n#else\n dX[i] = dY[i] * math::utils::Cube(Y[i]) * static_cast(-0.5);\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool RsqrtGradientFunctor::Forward(\n const std::vector& dY_dims,\n const std::vector& /* Y_dims */,\n const T* dY,\n const T* Y,\n T* dX,\n CUDAContext* context) const {\n const int size = std::accumulate(\n dY_dims.cbegin(), dY_dims.cend(), 1, std::multiplies());\n RsqrtGradientCUDAKernel\n <<cuda_stream()>>>(size, dY, Y, dX);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(\n Rsqrt,\n UnaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n RsqrtFunctor>);\nREGISTER_CUDA_OPERATOR(\n RsqrtGradient,\n BinaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n RsqrtGradientFunctor>);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/operators/rsqrt_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void\nRsqrtGradientHIPKernel(const int size, const T* dY, const T* Y, T* dX) {\n HIP_1D_KERNEL_LOOP(i, size) {\n#if __HIP_ARCH__ >= 350\n dX[i] = __ldg(dY + i) * math::utils::Cube(__ldg(Y + i)) *\n static_cast(-0.5);\n#else\n dX[i] = dY[i] * math::utils::Cube(Y[i]) * static_cast(-0.5);\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool RsqrtGradientFunctor::Forward(\n const std::vector& dY_dims,\n const std::vector& /* Y_dims */,\n const T* dY,\n const T* Y,\n T* dX,\n HIPContext* context) const {\n const int size = std::accumulate(\n dY_dims.cbegin(), dY_dims.cend(), 1, std::multiplies());\n hipLaunchKernelGGL(( RsqrtGradientHIPKernel)\n , dim3(CAFFE_GET_BLOCKS(size)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context->hip_stream(), size, dY, Y, dX);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(\n Rsqrt,\n UnaryElementwiseOp<\n TensorTypes,\n HIPContext,\n RsqrtFunctor>);\nREGISTER_HIP_OPERATOR(\n RsqrtGradient,\n BinaryElementwiseOp<\n TensorTypes,\n HIPContext,\n RsqrtGradientFunctor>);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#define _USE_MATH_DEFINES\n\n#include \n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\nnamespace {\n\nvoid silu_kernel(TensorIteratorBase& iter) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half,\n at::ScalarType::BFloat16,\n iter.dtype(),\n \"silu_cuda\",\n [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) -> scalar_t {\n using opmath_t = at::opmath_type;\n const opmath_t x_acc = static_cast(x);\n return x_acc / (opmath_t(1) + c10::cuda::compat::exp(-x_acc));\n });\n });\n}\n\nvoid silu_backward_kernel(TensorIteratorBase& iter) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half,\n at::ScalarType::BFloat16,\n iter.dtype(),\n \"silu_backward_cuda\",\n [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {\n using opmath_t = at::opmath_type;\n const opmath_t dy_acc = static_cast(dy);\n const opmath_t x_acc = static_cast(x);\n const opmath_t s_acc =\n opmath_t(1) / (opmath_t(1) + c10::cuda::compat::exp(-x_acc));\n return dy_acc * s_acc * (opmath_t(1) + x_acc * (opmath_t(1) - s_acc));\n });\n });\n}\n} // namespace\n\nREGISTER_DISPATCH(silu_stub, &silu_kernel);\nREGISTER_DISPATCH(silu_backward_stub, &silu_backward_kernel);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#define _USE_MATH_DEFINES\n\n#include \n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\nnamespace {\n\nvoid silu_kernel(TensorIteratorBase& iter) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half,\n at::ScalarType::BFloat16,\n iter.dtype(),\n \"silu_hip\",\n [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) -> scalar_t {\n using opmath_t = at::opmath_type;\n const opmath_t x_acc = static_cast(x);\n return x_acc / (opmath_t(1) + c10::hip::compat::exp(-x_acc));\n });\n });\n}\n\nvoid silu_backward_kernel(TensorIteratorBase& iter) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half,\n at::ScalarType::BFloat16,\n iter.dtype(),\n \"silu_backward_hip\",\n [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {\n using opmath_t = at::opmath_type;\n const opmath_t dy_acc = static_cast(dy);\n const opmath_t x_acc = static_cast(x);\n const opmath_t s_acc =\n opmath_t(1) / (opmath_t(1) + c10::hip::compat::exp(-x_acc));\n return dy_acc * s_acc * (opmath_t(1) + x_acc * (opmath_t(1) - s_acc));\n });\n });\n}\n} // namespace\n\nREGISTER_DISPATCH(silu_stub, &silu_kernel);\nREGISTER_DISPATCH(silu_backward_stub, &silu_backward_kernel);\n\n} // namespace at::native\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/scale_op.h\"\n\nnamespace caffe2 {\n\ntemplate <>\nbool ScaleOp::RunOnDevice() {\n return DispatchHelper>::call(this, Input(0));\n}\n\nREGISTER_CUDA_OPERATOR(Scale, ScaleOp);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/scale_op.h\"\n\nnamespace caffe2 {\n\ntemplate <>\nbool ScaleOp::RunOnDevice() {\n return DispatchHelper>::call(this, Input(0));\n}\n\nREGISTER_HIP_OPERATOR(Scale, ScaleOp);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/common_gpu.h\"\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/selu_op.h\"\n\nnamespace caffe2 {\nnamespace {\ntemplate \n__global__ void SeluKernel(const int N, const T* X, T* Y, T alpha_, T lambda_) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n Y[i] = lambda_ * (X[i] > 0 ? X[i] : alpha_ * __expf(X[i]) - alpha_);\n }\n}\n\ntemplate \n__global__ void SeluGradientKernel(\n const int N,\n const T* Y,\n const T* dY,\n T* dX,\n T alpha_,\n T lambda_) {\n const T c = lambda_ * alpha_;\n CUDA_1D_KERNEL_LOOP(i, N) {\n // Reuse Y[i] to avoid computing exp(X[i])\n dX[i] = Y[i] > 0 ? lambda_ * dY[i] : dY[i] * (Y[i] + c);\n }\n}\n} // namespace\n\ntemplate <>\nbool SeluOp::RunOnDevice() {\n auto& X = Input(0);\n\n CAFFE_ENFORCE_GT(X.numel(), 0);\n auto* Y = Output(0, X.sizes(), at::dtype());\n SeluKernel\n <<>>(\n X.numel(),\n X.data(),\n Y->template mutable_data(),\n alpha_,\n lambda_);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\ntemplate <>\nbool SeluGradientOp::RunOnDevice() {\n auto& Y = Input(0);\n auto& dY = Input(1);\n\n CAFFE_ENFORCE_GT(Y.numel(), 0);\n CAFFE_ENFORCE_EQ(dY.numel(), Y.numel());\n auto* dX = Output(0, Y.sizes(), at::dtype());\n SeluGradientKernel\n <<>>(\n Y.numel(),\n Y.data(),\n dY.data(),\n dX->template mutable_data(),\n alpha_,\n lambda_);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(Selu, SeluOp);\nREGISTER_CUDA_OPERATOR(SeluGradient, SeluGradientOp);\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/core/hip/common_gpu.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/selu_op.h\"\n\nnamespace caffe2 {\nnamespace {\ntemplate \n__global__ void SeluKernel(const int N, const T* X, T* Y, T alpha_, T lambda_) {\n HIP_1D_KERNEL_LOOP(i, N) {\n Y[i] = lambda_ * (X[i] > 0 ? X[i] : alpha_ * expf(X[i]) - alpha_);\n }\n}\n\ntemplate \n__global__ void SeluGradientKernel(\n const int N,\n const T* Y,\n const T* dY,\n T* dX,\n T alpha_,\n T lambda_) {\n const T c = lambda_ * alpha_;\n HIP_1D_KERNEL_LOOP(i, N) {\n // Reuse Y[i] to avoid computing exp(X[i])\n dX[i] = Y[i] > 0 ? lambda_ * dY[i] : dY[i] * (Y[i] + c);\n }\n}\n} // namespace\n\ntemplate <>\nbool SeluOp::RunOnDevice() {\n auto& X = Input(0);\n\n CAFFE_ENFORCE_GT(X.numel(), 0);\n auto* Y = Output(0, X.sizes(), at::dtype());\n hipLaunchKernelGGL(( SeluKernel)\n , dim3(CAFFE_GET_BLOCKS(X.numel())),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context_.hip_stream(), \n X.numel(),\n X.data(),\n Y->template mutable_data(),\n alpha_,\n lambda_);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\ntemplate <>\nbool SeluGradientOp::RunOnDevice() {\n auto& Y = Input(0);\n auto& dY = Input(1);\n\n CAFFE_ENFORCE_GT(Y.numel(), 0);\n CAFFE_ENFORCE_EQ(dY.numel(), Y.numel());\n auto* dX = Output(0, Y.sizes(), at::dtype());\n hipLaunchKernelGGL(( SeluGradientKernel)\n , dim3(CAFFE_GET_BLOCKS(Y.numel())),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context_.hip_stream(), \n Y.numel(),\n Y.data(),\n dY.data(),\n dX->template mutable_data(),\n alpha_,\n lambda_);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(Selu, SeluOp);\nREGISTER_HIP_OPERATOR(SeluGradient, SeluGradientOp);\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/shape_op.h\"\n\nnamespace caffe2 {\nREGISTER_CUDA_OPERATOR(Shape, ShapeOp);\n}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/shape_op.h\"\n\nnamespace caffe2 {\nREGISTER_HIP_OPERATOR(Shape, ShapeOp);\n}\n###" }, { "cuda": "\n#include \"caffe2/operators/sigmoid_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void SigmoidCUDAKernel(const int N, const T* X, T* Y);\n\ntemplate <>\n__global__ void\nSigmoidCUDAKernel(const int N, const float* X, float* Y) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n#if __CUDA_ARCH__ >= 350\n Y[i] = 1.0f / (1.0f + expf(-__ldg(X + i)));\n#else\n Y[i] = 1.0f / (1.0f + expf(-X[i]));\n#endif\n }\n}\n\ntemplate \n__global__ void\nSigmoidGradientCUDAKernel(const int N, const T* dY, const T* Y, T* dX) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n#if __CUDA_ARCH__ >= 350\n dX[i] = __ldg(dY + i) * __ldg(Y + i) * (T(1) - __ldg(Y + i));\n#else\n dX[i] = dY[i] * Y[i] * (T(1) - Y[i]);\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool SigmoidFunctor::\noperator()(const int N, const T* X, T* Y, CUDAContext* context) const {\n SigmoidCUDAKernel\n <<cuda_stream()>>>(N, X, Y);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\ntemplate <>\ntemplate \nbool SigmoidGradientFunctor::Forward(\n const std::vector& Y_dims,\n const std::vector& /* dY_dims */,\n const T* Y,\n const T* dY,\n T* dX,\n CUDAContext* context) const {\n const int size = std::accumulate(\n Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies());\n SigmoidGradientCUDAKernel\n <<cuda_stream()>>>(size, dY, Y, dX);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(\n Sigmoid,\n UnaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n SigmoidFunctor>);\nREGISTER_CUDA_OPERATOR(\n SigmoidGradient,\n BinaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n SigmoidGradientFunctor>);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/operators/sigmoid_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void SigmoidHIPKernel(const int N, const T* X, T* Y);\n\ntemplate <>\n__global__ void\nSigmoidHIPKernel(const int N, const float* X, float* Y) {\n HIP_1D_KERNEL_LOOP(i, N) {\n#if __HIP_ARCH__ >= 350\n Y[i] = 1.0f / (1.0f + expf(-__ldg(X + i)));\n#else\n Y[i] = 1.0f / (1.0f + expf(-X[i]));\n#endif\n }\n}\n\ntemplate \n__global__ void\nSigmoidGradientHIPKernel(const int N, const T* dY, const T* Y, T* dX) {\n HIP_1D_KERNEL_LOOP(i, N) {\n#if __HIP_ARCH__ >= 350\n dX[i] = __ldg(dY + i) * __ldg(Y + i) * (T(1) - __ldg(Y + i));\n#else\n dX[i] = dY[i] * Y[i] * (T(1) - Y[i]);\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool SigmoidFunctor::\noperator()(const int N, const T* X, T* Y, HIPContext* context) const {\n hipLaunchKernelGGL(( SigmoidHIPKernel)\n , dim3(CAFFE_GET_BLOCKS(N)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context->hip_stream(), N, X, Y);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\ntemplate <>\ntemplate \nbool SigmoidGradientFunctor::Forward(\n const std::vector& Y_dims,\n const std::vector& /* dY_dims */,\n const T* Y,\n const T* dY,\n T* dX,\n HIPContext* context) const {\n const int size = std::accumulate(\n Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies());\n hipLaunchKernelGGL(( SigmoidGradientHIPKernel)\n , dim3(CAFFE_GET_BLOCKS(size)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context->hip_stream(), size, dY, Y, dX);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(\n Sigmoid,\n UnaryElementwiseOp<\n TensorTypes,\n HIPContext,\n SigmoidFunctor>);\nREGISTER_HIP_OPERATOR(\n SigmoidGradient,\n BinaryElementwiseOp<\n TensorTypes,\n HIPContext,\n SigmoidGradientFunctor>);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/operators/sinh_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\n__global__ void SinhGradientCUDAKernel(\n const int N,\n const float* dY,\n const float* X,\n float* dX) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n#if __CUDA_ARCH__ >= 350\n dX[i] = __ldg(dY + i) * coshf(__ldg(X + i));\n#else\n dX[i] = dY[i] * coshf(X[i]);\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool SinhGradientFunctor::Forward(\n const std::vector& /* dY_dims */,\n const std::vector& X_dims,\n const T* dY,\n const T* X,\n T* dX,\n CUDAContext* context) const {\n const int size = std::accumulate(\n X_dims.cbegin(), X_dims.cend(), 1, std::multiplies());\n SinhGradientCUDAKernel<<<\n CAFFE_GET_BLOCKS(size),\n CAFFE_CUDA_NUM_THREADS,\n 0,\n context->cuda_stream()>>>(size, dY, X, dX);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(\n Sinh,\n UnaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n SinhFunctor>);\nREGISTER_CUDA_OPERATOR(\n SinhGradient,\n BinaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n SinhGradientFunctor>);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/operators/sinh_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\n__global__ void SinhGradientHIPKernel(\n const int N,\n const float* dY,\n const float* X,\n float* dX) {\n HIP_1D_KERNEL_LOOP(i, N) {\n#if __HIP_ARCH__ >= 350\n dX[i] = __ldg(dY + i) * coshf(__ldg(X + i));\n#else\n dX[i] = dY[i] * coshf(X[i]);\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool SinhGradientFunctor::Forward(\n const std::vector& /* dY_dims */,\n const std::vector& X_dims,\n const T* dY,\n const T* X,\n T* dX,\n HIPContext* context) const {\n const int size = std::accumulate(\n X_dims.cbegin(), X_dims.cend(), 1, std::multiplies());\n hipLaunchKernelGGL(( SinhGradientHIPKernel), \n dim3(CAFFE_GET_BLOCKS(size)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context->hip_stream(), size, dY, X, dX);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(\n Sinh,\n UnaryElementwiseOp<\n TensorTypes,\n HIPContext,\n SinhFunctor>);\nREGISTER_HIP_OPERATOR(\n SinhGradient,\n BinaryElementwiseOp<\n TensorTypes,\n HIPContext,\n SinhGradientFunctor>);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/operators/sin_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void\nSinGradientCUDAKernel(const int N, const T* dY, const T* X, T* dX) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n#if __CUDA_ARCH__ >= 350\n dX[i] = __ldg(dY + i) * cos(__ldg(X + i));\n#else\n dX[i] = dY[i] * cos(X[i]);\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool SinGradientFunctor::Forward(\n const std::vector& X_dims,\n const std::vector& /* dY_dims */,\n const T* X,\n const T* dY,\n T* dX,\n CUDAContext* context) const {\n const int size = std::accumulate(\n X_dims.cbegin(), X_dims.cend(), 1, std::multiplies());\n SinGradientCUDAKernel\n <<cuda_stream()>>>(size, dY, X, dX);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(\n Sin,\n UnaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n SinFunctor>);\nREGISTER_CUDA_OPERATOR(\n SinGradient,\n BinaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n SinGradientFunctor>);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/operators/sin_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void\nSinGradientHIPKernel(const int N, const T* dY, const T* X, T* dX) {\n HIP_1D_KERNEL_LOOP(i, N) {\n#if __HIP_ARCH__ >= 350\n dX[i] = __ldg(dY + i) * cos(__ldg(X + i));\n#else\n dX[i] = dY[i] * cos(X[i]);\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool SinGradientFunctor::Forward(\n const std::vector& X_dims,\n const std::vector& /* dY_dims */,\n const T* X,\n const T* dY,\n T* dX,\n HIPContext* context) const {\n const int size = std::accumulate(\n X_dims.cbegin(), X_dims.cend(), 1, std::multiplies());\n hipLaunchKernelGGL(( SinGradientHIPKernel)\n , dim3(CAFFE_GET_BLOCKS(size)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context->hip_stream(), size, dY, X, dX);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(\n Sin,\n UnaryElementwiseOp<\n TensorTypes,\n HIPContext,\n SinFunctor>);\nREGISTER_HIP_OPERATOR(\n SinGradient,\n BinaryElementwiseOp<\n TensorTypes,\n HIPContext,\n SinGradientFunctor>);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/softplus_op.h\"\n\nnamespace caffe2 {\nnamespace {\ntemplate \n__global__ void SoftplusKernel(const int N, const T* X, T* Y) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n Y[i] = log(exp(X[i]) + 1.0f);\n }\n}\n\ntemplate \n__global__ void\nSoftplusGradientKernel(const int N, const T* Y, const T* dY, T* dX) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n const float nexpY = exp(-Y[i]);\n dX[i] = dY[i] * (1 - nexpY);\n }\n}\n} // namespace\n\ntemplate <>\nbool SoftplusOp::RunOnDevice() {\n auto& X = Input(0);\n\n TORCH_DCHECK_GT(X.numel(), 0);\n auto* Y = Output(0, X.sizes(), at::dtype());\n SoftplusKernel\n <<>>(\n X.numel(), X.data(), Y->template mutable_data());\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\ntemplate <>\nbool SoftplusGradientOp::RunOnDevice() {\n auto& Y = Input(0);\n auto& dY = Input(1);\n\n TORCH_DCHECK_GT(Y.numel(), 0);\n TORCH_DCHECK_EQ(dY.numel(), Y.numel());\n auto* dX = Output(0, Y.sizes(), at::dtype());\n SoftplusGradientKernel\n <<>>(\n Y.numel(),\n Y.data(),\n dY.data(),\n dX->template mutable_data());\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(Softplus, SoftplusOp);\nREGISTER_CUDA_OPERATOR(\n SoftplusGradient,\n SoftplusGradientOp);\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/softplus_op.h\"\n\nnamespace caffe2 {\nnamespace {\ntemplate \n__global__ void SoftplusKernel(const int N, const T* X, T* Y) {\n HIP_1D_KERNEL_LOOP(i, N) {\n Y[i] = log(exp(X[i]) + 1.0f);\n }\n}\n\ntemplate \n__global__ void\nSoftplusGradientKernel(const int N, const T* Y, const T* dY, T* dX) {\n HIP_1D_KERNEL_LOOP(i, N) {\n const float nexpY = exp(-Y[i]);\n dX[i] = dY[i] * (1 - nexpY);\n }\n}\n} // namespace\n\ntemplate <>\nbool SoftplusOp::RunOnDevice() {\n auto& X = Input(0);\n\n TORCH_DCHECK_GT(X.numel(), 0);\n auto* Y = Output(0, X.sizes(), at::dtype());\n hipLaunchKernelGGL(( SoftplusKernel)\n , dim3(CAFFE_GET_BLOCKS(X.numel())),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context_.hip_stream(), \n X.numel(), X.data(), Y->template mutable_data());\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\ntemplate <>\nbool SoftplusGradientOp::RunOnDevice() {\n auto& Y = Input(0);\n auto& dY = Input(1);\n\n TORCH_DCHECK_GT(Y.numel(), 0);\n TORCH_DCHECK_EQ(dY.numel(), Y.numel());\n auto* dX = Output(0, Y.sizes(), at::dtype());\n hipLaunchKernelGGL(( SoftplusGradientKernel)\n , dim3(CAFFE_GET_BLOCKS(Y.numel())),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context_.hip_stream(), \n Y.numel(),\n Y.data(),\n dY.data(),\n dX->template mutable_data());\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(Softplus, SoftplusOp);\nREGISTER_HIP_OPERATOR(\n SoftplusGradient,\n SoftplusGradientOp);\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/operators/softsign_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\nusing c10::cuda::compat::abs;\n\ntemplate \ninline __host__ __device__ T SquareCUDA(const T x) {\n return x * x;\n}\n\ntemplate \n__global__ void SoftsignCUDAKernel(const int N, const T* X, T* Y) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n#if __CUDA_ARCH__ >= 350\n Y[i] = __ldg(X + i) / (T(1) + abs(__ldg(X + i)));\n#else\n Y[i] = X[i] / (T(1) + abs(X[i]));\n#endif\n }\n}\n\ntemplate \n__global__ void\nSoftsignGradientCUDAKernel(const int N, const T* dY, const T* X, T* dX) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n#if __CUDA_ARCH__ >= 350\n dX[i] = __ldg(dY + i) / SquareCUDA(T(1) + abs(__ldg(X + i)));\n#else\n dX[i] = dY[i] / SquareCUDA(T(1) + abs(X[i]));\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool SoftsignFunctor::\noperator()(const int N, const T* X, T* Y, CUDAContext* context) const {\n SoftsignCUDAKernel\n <<cuda_stream()>>>(N, X, Y);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\ntemplate <>\ntemplate \nbool SoftsignGradientFunctor::Forward(\n const std::vector& X_dims,\n const std::vector& /* dY_dims */,\n const T* X,\n const T* dY,\n T* dX,\n CUDAContext* context) const {\n const int size = std::accumulate(\n X_dims.cbegin(), X_dims.cend(), 1, std::multiplies());\n SoftsignGradientCUDAKernel\n <<cuda_stream()>>>(size, dY, X, dX);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(\n Softsign,\n UnaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n SoftsignFunctor>);\nREGISTER_CUDA_OPERATOR(\n SoftsignGradient,\n BinaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n SoftsignGradientFunctor>);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/operators/softsign_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\nusing c10::hip::compat::abs;\n\ntemplate \ninline __host__ __device__ T SquareHIP(const T x) {\n return x * x;\n}\n\ntemplate \n__global__ void SoftsignHIPKernel(const int N, const T* X, T* Y) {\n HIP_1D_KERNEL_LOOP(i, N) {\n#if __HIP_ARCH__ >= 350\n Y[i] = __ldg(X + i) / (T(1) + abs(__ldg(X + i)));\n#else\n Y[i] = X[i] / (T(1) + abs(X[i]));\n#endif\n }\n}\n\ntemplate \n__global__ void\nSoftsignGradientHIPKernel(const int N, const T* dY, const T* X, T* dX) {\n HIP_1D_KERNEL_LOOP(i, N) {\n#if __HIP_ARCH__ >= 350\n dX[i] = __ldg(dY + i) / SquareHIP(T(1) + abs(__ldg(X + i)));\n#else\n dX[i] = dY[i] / SquareHIP(T(1) + abs(X[i]));\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool SoftsignFunctor::\noperator()(const int N, const T* X, T* Y, HIPContext* context) const {\n hipLaunchKernelGGL(( SoftsignHIPKernel)\n , dim3(CAFFE_GET_BLOCKS(N)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context->hip_stream(), N, X, Y);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\ntemplate <>\ntemplate \nbool SoftsignGradientFunctor::Forward(\n const std::vector& X_dims,\n const std::vector& /* dY_dims */,\n const T* X,\n const T* dY,\n T* dX,\n HIPContext* context) const {\n const int size = std::accumulate(\n X_dims.cbegin(), X_dims.cend(), 1, std::multiplies());\n hipLaunchKernelGGL(( SoftsignGradientHIPKernel)\n , dim3(CAFFE_GET_BLOCKS(size)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context->hip_stream(), size, dY, X, dX);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(\n Softsign,\n UnaryElementwiseOp<\n TensorTypes,\n HIPContext,\n SoftsignFunctor>);\nREGISTER_HIP_OPERATOR(\n SoftsignGradient,\n BinaryElementwiseOp<\n TensorTypes,\n HIPContext,\n SoftsignGradientFunctor>);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/operator_fallback_gpu.h\"\n#include \"caffe2/operators/sparse_lp_regularizer_op.h\"\n\nnamespace caffe2 {\nREGISTER_CUDA_OPERATOR(SparseLpRegularizer, GPUFallbackOp);\n}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/hip/operator_fallback_gpu.h\"\n#include \"caffe2/operators/sparse_lp_regularizer_op.h\"\n\nnamespace caffe2 {\nREGISTER_HIP_OPERATOR(SparseLpRegularizer, GPUFallbackOp);\n}\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/operator_fallback_gpu.h\"\n#include \"caffe2/operators/sparse_normalize_op.h\"\n\nnamespace caffe2 {\nREGISTER_CUDA_OPERATOR(\n SparseNormalize,\n GPUFallbackOp);\n}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/hip/operator_fallback_gpu.h\"\n#include \"caffe2/operators/sparse_normalize_op.h\"\n\nnamespace caffe2 {\nREGISTER_HIP_OPERATOR(\n SparseNormalize,\n GPUFallbackOp);\n}\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#define _USE_MATH_DEFINES\n\n#include \n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\nnamespace {\n\nvoid softplus_kernel(\n TensorIteratorBase& iter,\n const Scalar& beta_,\n const Scalar& threshold_) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half,\n at::ScalarType::BFloat16,\n iter.dtype(),\n \"softplus_cuda\",\n [&]() {\n using opmath_t = at::opmath_type;\n auto beta = beta_.to();\n auto threshold = threshold_.to();\n gpu_kernel(iter, [beta, threshold] GPU_LAMBDA(scalar_t a) -> scalar_t {\n opmath_t aop = static_cast(a);\n return (aop * beta) > threshold\n ? aop\n : (::log1p(std::exp(aop * beta))) / beta;\n });\n });\n}\n\nvoid softplus_backward_kernel(\n TensorIteratorBase& iter,\n const Scalar& beta_,\n const Scalar& threshold_) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half,\n at::ScalarType::BFloat16,\n iter.dtype(),\n \"softplus_backward_cuda\",\n [&]() {\n using opmath_t = at::opmath_type;\n auto beta = beta_.to();\n auto threshold = threshold_.to();\n gpu_kernel(\n iter,\n [beta, threshold] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {\n opmath_t aop = static_cast(a);\n opmath_t bop = static_cast(b);\n opmath_t z = std::exp(bop * beta);\n return (bop * beta) > threshold ? aop\n : aop * z / (z + opmath_t(1.));\n });\n });\n}\n\n} // namespace\n\nREGISTER_DISPATCH(softplus_stub, &softplus_kernel);\nREGISTER_DISPATCH(softplus_backward_stub, &softplus_backward_kernel);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#define _USE_MATH_DEFINES\n\n#include \n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\nnamespace {\n\nvoid softplus_kernel(\n TensorIteratorBase& iter,\n const Scalar& beta_,\n const Scalar& threshold_) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half,\n at::ScalarType::BFloat16,\n iter.dtype(),\n \"softplus_hip\",\n [&]() {\n using opmath_t = at::opmath_type;\n auto beta = beta_.to();\n auto threshold = threshold_.to();\n gpu_kernel(iter, [beta, threshold] GPU_LAMBDA(scalar_t a) -> scalar_t {\n opmath_t aop = static_cast(a);\n return (aop * beta) > threshold\n ? aop\n : (::log1p(::exp(aop * beta))) / beta;\n });\n });\n}\n\nvoid softplus_backward_kernel(\n TensorIteratorBase& iter,\n const Scalar& beta_,\n const Scalar& threshold_) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half,\n at::ScalarType::BFloat16,\n iter.dtype(),\n \"softplus_backward_hip\",\n [&]() {\n using opmath_t = at::opmath_type;\n auto beta = beta_.to();\n auto threshold = threshold_.to();\n gpu_kernel(\n iter,\n [beta, threshold] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {\n opmath_t aop = static_cast(a);\n opmath_t bop = static_cast(b);\n opmath_t z = ::exp(bop * beta);\n return (bop * beta) > threshold ? aop\n : aop * z / (z + opmath_t(1.));\n });\n });\n}\n\n} // namespace\n\nREGISTER_DISPATCH(softplus_stub, &softplus_kernel);\nREGISTER_DISPATCH(softplus_backward_stub, &softplus_backward_kernel);\n\n} // namespace at::native\n###" }, { "cuda": "\n#include \"caffe2/operators/sparse_to_dense_op.h\"\n\n#include \"caffe2/core/common_gpu.h\"\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/utils/GpuAtomics.cuh\"\n\nnamespace caffe2 {\n\n template \n __global__ void SparseToDenseKernel(\n size_t N, int64_t block_nitems, const TInd* indices, const TData* vals, TData* dst) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n int idx = indices[i / block_nitems];\n int dst_idx = block_nitems * idx + i % block_nitems;\n gpu_atomic_add(&dst[dst_idx], vals[i]);\n }\n }\n\n template <>\n bool SparseToDenseOp::RunOnDevice() {\n return DispatchHelper>::call(\n this, Input(INDICES));\n }\n\n template <>\n template \n bool SparseToDenseOp::DoRunWithType() {\n return DispatchHelper<\n TensorTypes2<\n float,\n int32_t>,\n TInd>::call(this, Input(VALUES));\n }\n\n template <>\n template \n bool SparseToDenseOp::DoRunWithType2() {\n auto& sparse_indices = Input(INDICES);\n CAFFE_ENFORCE_EQ(sparse_indices.dim(), 1);\n auto& sparse_values = Input(VALUES);\n CAFFE_ENFORCE_GE(sparse_values.dim(), 1);\n CAFFE_ENFORCE_EQ(sparse_indices.numel(), sparse_values.dim(0));\n\n const TInd* sparse_indices_vec = sparse_indices.template data();\n const int32_t sparse_indices_len = sparse_indices.dim32(0);\n const int output_first_dim =\n GetOutputFirstDim(sparse_indices_vec, sparse_indices_len);\n\n auto shape = sparse_values.sizes().vec();\n shape[0] = output_first_dim;\n\n auto* output = Output(0, shape, at::dtype());\n\n TData* output_data = output->template mutable_data();\n math::Set(output->numel(), TData(0), output_data, &context_);\n\n const auto block_nitems = sparse_values.size_from_dim(1);\n const TData* sparse_values_vec = sparse_values.template data();\n\n size_t N = block_nitems * sparse_indices_len;\n CAFFE_ENFORCE_EQ(output->numel(), output_first_dim * block_nitems);\n SparseToDenseKernel<<<\n CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0,\n context_.cuda_stream()>>>(\n N,\n block_nitems,\n sparse_indices_vec,\n sparse_values_vec,\n output_data\n );\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n }\n\n\nREGISTER_CUDA_OPERATOR(SparseToDense, SparseToDenseOp);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/operators/sparse_to_dense_op.h\"\n\n#include \"caffe2/core/hip/common_gpu.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/utils/hip/GpuAtomics.cuh\"\n\nnamespace caffe2 {\n\n template \n __global__ void SparseToDenseKernel(\n size_t N, int64_t block_nitems, const TInd* indices, const TData* vals, TData* dst) {\n HIP_1D_KERNEL_LOOP(i, N) {\n int idx = indices[i / block_nitems];\n int dst_idx = block_nitems * idx + i % block_nitems;\n gpu_atomic_add(&dst[dst_idx], vals[i]);\n }\n }\n\n template <>\n bool SparseToDenseOp::RunOnDevice() {\n return DispatchHelper>::call(\n this, Input(INDICES));\n }\n\n template <>\n template \n bool SparseToDenseOp::DoRunWithType() {\n return DispatchHelper<\n TensorTypes2<\n float,\n int32_t>,\n TInd>::call(this, Input(VALUES));\n }\n\n template <>\n template \n bool SparseToDenseOp::DoRunWithType2() {\n auto& sparse_indices = Input(INDICES);\n CAFFE_ENFORCE_EQ(sparse_indices.dim(), 1);\n auto& sparse_values = Input(VALUES);\n CAFFE_ENFORCE_GE(sparse_values.dim(), 1);\n CAFFE_ENFORCE_EQ(sparse_indices.numel(), sparse_values.dim(0));\n\n const TInd* sparse_indices_vec = sparse_indices.template data();\n const int32_t sparse_indices_len = sparse_indices.dim32(0);\n const int output_first_dim =\n GetOutputFirstDim(sparse_indices_vec, sparse_indices_len);\n\n auto shape = sparse_values.sizes().vec();\n shape[0] = output_first_dim;\n\n auto* output = Output(0, shape, at::dtype());\n\n TData* output_data = output->template mutable_data();\n math::Set(output->numel(), TData(0), output_data, &context_);\n\n const auto block_nitems = sparse_values.size_from_dim(1);\n const TData* sparse_values_vec = sparse_values.template data();\n\n size_t N = block_nitems * sparse_indices_len;\n CAFFE_ENFORCE_EQ(output->numel(), output_first_dim * block_nitems);\n hipLaunchKernelGGL(( SparseToDenseKernel), \n dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_HIP_NUM_THREADS), 0,\n context_.hip_stream(), \n N,\n block_nitems,\n sparse_indices_vec,\n sparse_values_vec,\n output_data\n );\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n }\n\n\nREGISTER_HIP_OPERATOR(SparseToDense, SparseToDenseOp);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/operators/spatial_batch_norm_op.h\"\n\n#include \"caffe2/operators/spatial_batch_norm_op_impl.cuh\"\n\nnamespace caffe2 {\n\nREGISTER_CUDA_OPERATOR(SpatialBN, SpatialBNOp);\nREGISTER_CUDA_OPERATOR(SpatialBNGradient, SpatialBNGradientOp);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/operators/spatial_batch_norm_op.h\"\n\n#include \"caffe2/operators/hip/spatial_batch_norm_op_impl.cuh\"\n\nnamespace caffe2 {\n\nREGISTER_HIP_OPERATOR(SpatialBN, SpatialBNOp);\nREGISTER_HIP_OPERATOR(SpatialBNGradient, SpatialBNGradientOp);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/operators/sqrt_op.h\"\n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_CUDA_OPERATOR(\n Sqrt,\n UnaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n SqrtFunctor>);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/operators/sqrt_op.h\"\n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_HIP_OPERATOR(\n Sqrt,\n UnaryElementwiseOp<\n TensorTypes,\n HIPContext,\n SqrtFunctor>);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/operators/sqr_op.h\"\n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_CUDA_OPERATOR(\n Sqr,\n UnaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n SqrFunctor>);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/operators/sqr_op.h\"\n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_HIP_OPERATOR(\n Sqr,\n UnaryElementwiseOp<\n TensorTypes,\n HIPContext,\n SqrFunctor>);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/stop_gradient.h\"\n\nnamespace caffe2 {\nREGISTER_CUDA_OPERATOR(StopGradient, StopGradientOp);\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/stop_gradient.h\"\n\nnamespace caffe2 {\nREGISTER_HIP_OPERATOR(StopGradient, StopGradientOp);\n} // namespace caffe2\n###" }, { "cuda": "\n/**\n * Copyright (c) 2016-present, Facebook, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/stump_func_op.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void StumpFuncKernel(\n const int N,\n const TIN threshold,\n const TOUT low_value,\n const TOUT high_value,\n const TIN* X,\n TOUT* Y) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n Y[i] = (X[i] <= threshold) ? low_value : high_value;\n }\n}\n\n} //\n\ntemplate <>\nbool StumpFuncOp::RunOnDevice() {\n auto& in = Input(0);\n const float* in_data = in.data();\n\n auto* out = Output(0, in.sizes(), at::dtype());\n float* out_data = out->template mutable_data();\n StumpFuncKernel<<>>(\n in.numel(), threshold_, low_value_, high_value_, in_data, out_data);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(StumpFunc, StumpFuncOp);\n// NO_GRADIENT(StumpFuncGpu);\n\n} // caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n/**\n * Copyright (c) 2016-present, Facebook, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/stump_func_op.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void StumpFuncKernel(\n const int N,\n const TIN threshold,\n const TOUT low_value,\n const TOUT high_value,\n const TIN* X,\n TOUT* Y) {\n HIP_1D_KERNEL_LOOP(i, N) {\n Y[i] = (X[i] <= threshold) ? low_value : high_value;\n }\n}\n\n} //\n\ntemplate <>\nbool StumpFuncOp::RunOnDevice() {\n auto& in = Input(0);\n const float* in_data = in.data();\n\n auto* out = Output(0, in.sizes(), at::dtype());\n float* out_data = out->template mutable_data();\n hipLaunchKernelGGL(( StumpFuncKernel), dim3(CAFFE_GET_BLOCKS(in.numel())), dim3(CAFFE_HIP_NUM_THREADS),\n 0, context_.hip_stream(), \n in.numel(), threshold_, low_value_, high_value_, in_data, out_data);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(StumpFunc, StumpFuncOp);\n// NO_GRADIENT(StumpFuncGpu);\n\n} // caffe2\n###" }, { "cuda": "\n#include \n#include \n#include \n#include \n#include \"caffe2/operators/summarize_op.h\"\n#include \"caffe2/core/context_gpu.h\"\nnamespace caffe2 {\nnamespace {\n\n\ntemplate \nstruct SummaryStatsData {\n T n;\n T min;\n T max;\n T mean;\n T M2;\n \n void initialize() {\n n = mean = M2 = 0;\n min = std::numeric_limits::max();\n max = std::numeric_limits::min();\n }\n T variance() { return (n == 1 ? 0 : M2 / (n - 1)); }\n};\n\n\ntemplate \nstruct summary_stats_unary_op {\n __host__ __device__ SummaryStatsData operator()(const T& x) const {\n SummaryStatsData result;\n result.n = 1;\n result.min = x;\n result.max = x;\n result.mean = x;\n result.M2 = 0;\n return result;\n }\n};\n\n\n\n\ntemplate \nstruct summary_stats_binary_op\n : public thrust::binary_function&, const SummaryStatsData&, SummaryStatsData > {\n __host__ __device__ SummaryStatsData operator()(\n const SummaryStatsData& x, const SummaryStatsData & y) const {\n SummaryStatsData result;\n T n = x.n + y.n;\n T delta = y.mean - x.mean;\n T delta2 = delta * delta;\n result.n = n;\n result.min = thrust::min(x.min, y.min);\n result.max = thrust::max(x.max, y.max);\n result.mean = x.mean + delta * y.n / n;\n result.M2 = x.M2 + y.M2;\n result.M2 += delta2 * x.n * y.n / n;\n return result;\n }\n};\n} \ntemplate<>\nbool SummarizeOp::RunOnDevice() {\n auto& X = Input(0);\n const int N = X.numel();\n TORCH_DCHECK_GT(N, 0);\n \n thrust::device_ptr Xdata(const_cast(X.data()));\n summary_stats_unary_op unary_op;\n summary_stats_binary_op binary_op;\n SummaryStatsData init;\n init.initialize();\n \n SummaryStatsData result = thrust::transform_reduce(\n#if THRUST_VERSION >= 100800\n thrust::cuda::par.on(context_.cuda_stream()), #endif \n Xdata, Xdata + N, unary_op, init, binary_op);\n float standard_deviation = std::sqrt(result.variance());\n if (to_file_) {\n (*log_file_) << result.min << \" \" << result.max << \" \" << result.mean << \" \"\n << standard_deviation << std::endl;\n }\n if (OutputSize()) {\n auto* Y = Output(0, {4}, at::dtype());\n float output_buffer[NUM_STATS] = {result.min, result.max, result.mean, standard_deviation};\n context_.CopyFromCPU(\n NUM_STATS, output_buffer, Y->template mutable_data());\n }\n return true;\n}\nREGISTER_CUDA_OPERATOR(Summarize, SummarizeOp);\n} \n\n###", "hip": " \n#include \n#include \n#include \n#include \n#include \"caffe2/operators/summarize_op.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\nnamespace caffe2 {\nnamespace {\n\n\ntemplate \nstruct SummaryStatsData {\n T n;\n T min;\n T max;\n T mean;\n T M2;\n \n void initialize() {\n n = mean = M2 = 0;\n min = std::numeric_limits::max();\n max = std::numeric_limits::min();\n }\n T variance() { return (n == 1 ? 0 : M2 / (n - 1)); }\n};\n\n\ntemplate \nstruct summary_stats_unary_op {\n __host__ __device__ SummaryStatsData operator()(const T& x) const {\n SummaryStatsData result;\n result.n = 1;\n result.min = x;\n result.max = x;\n result.mean = x;\n result.M2 = 0;\n return result;\n }\n};\n\n\n\n\ntemplate \nstruct summary_stats_binary_op\n : public thrust::binary_function&, const SummaryStatsData&, SummaryStatsData > {\n __host__ __device__ SummaryStatsData operator()(\n const SummaryStatsData& x, const SummaryStatsData & y) const {\n SummaryStatsData result;\n T n = x.n + y.n;\n T delta = y.mean - x.mean;\n T delta2 = delta * delta;\n result.n = n;\n result.min = thrust::min(x.min, y.min);\n result.max = thrust::max(x.max, y.max);\n result.mean = x.mean + delta * y.n / n;\n result.M2 = x.M2 + y.M2;\n result.M2 += delta2 * x.n * y.n / n;\n return result;\n }\n};\n} \ntemplate<>\nbool SummarizeOp::RunOnDevice() {\n auto& X = Input(0);\n const int N = X.numel();\n TORCH_DCHECK_GT(N, 0);\n \n thrust::device_ptr Xdata(const_cast(X.data()));\n summary_stats_unary_op unary_op;\n summary_stats_binary_op binary_op;\n SummaryStatsData init;\n init.initialize();\n \n SummaryStatsData result = thrust::transform_reduce(\n#if THRUST_VERSION >= 100800\n thrust::hip::par.on(context_.hip_stream()), #endif \n Xdata, Xdata + N, unary_op, init, binary_op);\n float standard_deviation = std::sqrt(result.variance());\n if (to_file_) {\n (*log_file_) << result.min << \" \" << result.max << \" \" << result.mean << \" \"\n << standard_deviation << std::endl;\n }\n if (OutputSize()) {\n auto* Y = Output(0, {4}, at::dtype());\n float output_buffer[NUM_STATS] = {result.min, result.max, result.mean, standard_deviation};\n context_.CopyFromCPU(\n NUM_STATS, output_buffer, Y->template mutable_data());\n }\n return true;\n}\nREGISTER_HIP_OPERATOR(Summarize, SummarizeOp);\n} ###" }, { "cuda": "\n#include \"caffe2/operators/swish_op.h\"\n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void SwishCUDAKernel(const int N, const T* X, T* Y) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n#if __CUDA_ARCH__ >= 350\n Y[i] = __ldg(X + i) / (T(1) + exp(-__ldg(X + i)));\n#else\n Y[i] = X[i] / (T(1) + exp(-X[i]));\n#endif\n }\n}\n\ntemplate \n__global__ void SwishGradientCUDAKernel(\n const int N,\n const T* X,\n const T* Y,\n const T* dY,\n T* dX) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n#if __CUDA_ARCH__ >= 350\n dX[i] = __ldg(dY + i) *\n (__ldg(Y + i) + (T(1) - __ldg(Y + i)) / (T(1) + exp(-__ldg(X + i))));\n#else\n dX[i] = dY[i] * (Y[i] + (T(1) - Y[i]) / (T(1) + exp(-X[i])));\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool SwishFunctor::\noperator()(const int N, const T* X, T* Y, CUDAContext* context) const {\n SwishCUDAKernel\n <<cuda_stream()>>>(N, X, Y);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\ntemplate <>\ntemplate \nbool SwishGradientOp::DoRunWithType() {\n auto& Xin = Input(X);\n auto& Yin = Input(Y);\n auto& DYin = Input(DY);\n auto* DXout = Output(DX);\n CAFFE_ENFORCE_EQ(Xin.size(), Yin.size());\n CAFFE_ENFORCE_EQ(DYin.size(), Yin.size());\n DXout->ResizeLike(Yin);\n\n const int n = Xin.size();\n const T* x = Xin.template data();\n const T* y = Yin.template data();\n const T* dy = DYin.template data();\n T* dx = DXout->template mutable_data();\n SwishGradientCUDAKernel\n <<>>(n, x, y, dy, dx);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\ntemplate <>\nbool SwishGradientOp::RunOnDevice() {\n return DispatchHelper>::call(this, Input(X));\n}\n\nREGISTER_CUDA_OPERATOR(\n Swish,\n UnaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n SwishFunctor>);\nREGISTER_CUDA_OPERATOR(SwishGradient, SwishGradientOp);\n\n} // namespace caffe2\n\n\n###", "hip": " \n#include \"hip/hip_runtime.h\"\n#include \"caffe2/operators/swish_op.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\nnamespace caffe2 {\nnamespace {\ntemplate \n__global__ void SwishHIPKernel(const int N, const T* X, T* Y) {\n HIP_1D_KERNEL_LOOP(i, N) {\n#if __HIP_ARCH__ >= 350\n Y[i] = __ldg(X + i) / (T(1) + exp(-__ldg(X + i)));\n#else\n Y[i] = X[i] / (T(1) + exp(-X[i]));\n#endif\n }\n}\ntemplate \n__global__ void SwishGradientHIPKernel(\n const int N, const T* X, const T* Y, const T* dY, T* dX) {\n HIP_1D_KERNEL_LOOP(i, N) {\n#if __HIP_ARCH__ >= 350\n dX[i] = __ldg(dY + i) *\n (__ldg(Y + i) + (T(1) - __ldg(Y + i)) / (T(1) + exp(-__ldg(X + i))));\n#else\n dX[i] = dY[i] * (Y[i] + (T(1) - Y[i]) / (T(1) + exp(-X[i])));\n#endif\n }\n}\n} \ntemplate <>\ntemplate \nbool SwishFunctor::\noperator()(const int N, const T* X, T* Y, HIPContext* context) const {\n hipLaunchKernelGGL(( SwishHIPKernel)\n , dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_HIP_NUM_THREADS), 0, context->hip_stream(), N, X, Y);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n return true;\n}\ntemplate <>\ntemplate \nbool SwishGradientOp::DoRunWithType() {\n auto& Xin = Input(X);\n auto& Yin = Input(Y);\n auto& DYin = Input(DY);\n auto* DXout = Output(DX);\n CAFFE_ENFORCE_EQ(Xin.size(), Yin.size());\n CAFFE_ENFORCE_EQ(DYin.size(), Yin.size());\n DXout->ResizeLike(Yin);\n const int n = Xin.size();\n const T* x = Xin.template data();\n const T* y = Yin.template data();\n const T* dy = DYin.template data();\n T* dx = DXout->template mutable_data();\n hipLaunchKernelGGL(( SwishGradientHIPKernel)\n , dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_HIP_NUM_THREADS), 0, context_.hip_stream(), n, x, y, dy, dx);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n return true;\n}\ntemplate <>\nbool SwishGradientOp::RunOnDevice() {\n return DispatchHelper>::call(this, Input(X));\n}\nREGISTER_HIP_OPERATOR(\n Swish, UnaryElementwiseOp<\n TensorTypes, HIPContext, SwishFunctor>);\nREGISTER_HIP_OPERATOR(SwishGradient, SwishGradientOp);\n} ###" }, { "cuda": "\n#include \"caffe2/operators/tanh_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void\nTanhGradientCUDAKernel(const int N, const T* dY, const T* Y, T* dX) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n#if __CUDA_ARCH__ >= 350\n dX[i] = __ldg(dY + i) * (T(1) - __ldg(Y + i) * __ldg(Y + i));\n#else\n dX[i] = dY[i] * (T(1) - Y[i] * Y[i]);\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool TanhGradientFunctor::Forward(\n const std::vector& Y_dims,\n const std::vector& /* dY_dims */,\n const T* Y,\n const T* dY,\n T* dX,\n CUDAContext* context) const {\n const int size = std::accumulate(\n Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies());\n TanhGradientCUDAKernel\n <<cuda_stream()>>>(size, dY, Y, dX);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(\n Tanh,\n UnaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n TanhFunctor>);\nREGISTER_CUDA_OPERATOR(\n TanhGradient,\n BinaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n TanhGradientFunctor>);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/operators/tanh_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \n__global__ void\nTanhGradientHIPKernel(const int N, const T* dY, const T* Y, T* dX) {\n HIP_1D_KERNEL_LOOP(i, N) {\n#if __HIP_ARCH__ >= 350\n dX[i] = __ldg(dY + i) * (T(1) - __ldg(Y + i) * __ldg(Y + i));\n#else\n dX[i] = dY[i] * (T(1) - Y[i] * Y[i]);\n#endif\n }\n}\n\n} // namespace\n\ntemplate <>\ntemplate \nbool TanhGradientFunctor::Forward(\n const std::vector& Y_dims,\n const std::vector& /* dY_dims */,\n const T* Y,\n const T* dY,\n T* dX,\n HIPContext* context) const {\n const int size = std::accumulate(\n Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies());\n hipLaunchKernelGGL(( TanhGradientHIPKernel)\n , dim3(CAFFE_GET_BLOCKS(size)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context->hip_stream(), size, dY, Y, dX);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(\n Tanh,\n UnaryElementwiseOp<\n TensorTypes,\n HIPContext,\n TanhFunctor>);\nREGISTER_HIP_OPERATOR(\n TanhGradient,\n BinaryElementwiseOp<\n TensorTypes,\n HIPContext,\n TanhGradientFunctor>);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/operators/tan_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\ntemplate \ninline __host__ __device__ T Square(const T& x) {\n return x * x;\n}\n\ntemplate \n__global__ void\nTanGradientCUDAKernel(const int N, const T* dY, const T* X, T* dX) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n#if __CUDA_ARCH__ >= 350\n dX[i] = __ldg(dY + i) / Square(cos(__ldg(X + i)));\n#else\n dX[i] = dY[i] / Square(cos(X[i]));\n#endif\n }\n}\n\ntemplate <>\ntemplate \nbool TanGradientFunctor::Forward(\n const std::vector& X_dims,\n const std::vector& /* dY_dims */,\n const T* X,\n const T* dY,\n T* dX,\n CUDAContext* context) const {\n const int size = std::accumulate(\n X_dims.cbegin(), X_dims.cend(), 1, std::multiplies());\n TanGradientCUDAKernel\n <<cuda_stream()>>>(size, dY, X, dX);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(\n Tan,\n UnaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n TanFunctor>);\nREGISTER_CUDA_OPERATOR(\n TanGradient,\n BinaryElementwiseOp<\n TensorTypes,\n CUDAContext,\n TanGradientFunctor>);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/operators/tan_op.h\"\n\n#include \n#include \n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\ntemplate \ninline __host__ __device__ T Square(const T& x) {\n return x * x;\n}\n\ntemplate \n__global__ void\nTanGradientHIPKernel(const int N, const T* dY, const T* X, T* dX) {\n HIP_1D_KERNEL_LOOP(i, N) {\n#if __HIP_ARCH__ >= 350\n dX[i] = __ldg(dY + i) / Square(cos(__ldg(X + i)));\n#else\n dX[i] = dY[i] / Square(cos(X[i]));\n#endif\n }\n}\n\ntemplate <>\ntemplate \nbool TanGradientFunctor::Forward(\n const std::vector& X_dims,\n const std::vector& /* dY_dims */,\n const T* X,\n const T* dY,\n T* dX,\n HIPContext* context) const {\n const int size = std::accumulate(\n X_dims.cbegin(), X_dims.cend(), 1, std::multiplies());\n hipLaunchKernelGGL(( TanGradientHIPKernel)\n , dim3(CAFFE_GET_BLOCKS(size)),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context->hip_stream(), size, dY, X, dX);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(\n Tan,\n UnaryElementwiseOp<\n TensorTypes,\n HIPContext,\n TanFunctor>);\nREGISTER_HIP_OPERATOR(\n TanGradient,\n BinaryElementwiseOp<\n TensorTypes,\n HIPContext,\n TanGradientFunctor>);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#define _USE_MATH_DEFINES\n\n#include \n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\nnamespace {\n\nvoid softshrink_kernel(TensorIteratorBase& iter, const Scalar& value) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half,\n at::ScalarType::BFloat16,\n iter.dtype(),\n \"softshrink_cuda\",\n [&]() {\n auto lambd = value.to();\n gpu_kernel(iter, [lambd] GPU_LAMBDA(scalar_t a) -> scalar_t {\n return a > lambd ? a - lambd : (a < -lambd ? a + lambd : scalar_t(0));\n });\n });\n}\n\nvoid shrink_backward_kernel(TensorIteratorBase& iter, const Scalar& value) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half,\n at::ScalarType::BFloat16,\n iter.dtype(),\n \"shrink_backward_cuda\",\n [&]() {\n auto lambd = value.to();\n gpu_kernel(\n iter,\n [lambd] GPU_LAMBDA(\n scalar_t grad_val, scalar_t self_val) -> scalar_t {\n return (self_val >= -lambd && self_val <= lambd) ? scalar_t(0)\n : grad_val;\n });\n });\n}\n} // namespace\n\nREGISTER_DISPATCH(softshrink_stub, &softshrink_kernel);\nREGISTER_DISPATCH(shrink_backward_stub, &shrink_backward_kernel);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#define _USE_MATH_DEFINES\n\n#include \n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\nnamespace {\n\nvoid softshrink_kernel(TensorIteratorBase& iter, const Scalar& value) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half,\n at::ScalarType::BFloat16,\n iter.dtype(),\n \"softshrink_hip\",\n [&]() {\n auto lambd = value.to();\n gpu_kernel(iter, [lambd] GPU_LAMBDA(scalar_t a) -> scalar_t {\n return a > lambd ? a - lambd : (a < -lambd ? a + lambd : scalar_t(0));\n });\n });\n}\n\nvoid shrink_backward_kernel(TensorIteratorBase& iter, const Scalar& value) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half,\n at::ScalarType::BFloat16,\n iter.dtype(),\n \"shrink_backward_hip\",\n [&]() {\n auto lambd = value.to();\n gpu_kernel(\n iter,\n [lambd] GPU_LAMBDA(\n scalar_t grad_val, scalar_t self_val) -> scalar_t {\n return (self_val >= -lambd && self_val <= lambd) ? scalar_t(0)\n : grad_val;\n });\n });\n}\n} // namespace\n\nREGISTER_DISPATCH(softshrink_stub, &softshrink_kernel);\nREGISTER_DISPATCH(shrink_backward_stub, &shrink_backward_kernel);\n\n} // namespace at::native\n###" }, { "cuda": "\n#include \"caffe2/core/common_gpu.h\"\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/tensor_protos_db_input.h\"\n\nnamespace caffe2 {\nREGISTER_CUDA_OPERATOR(TensorProtosDBInput, TensorProtosDBInput);\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/common_gpu.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/tensor_protos_db_input.h\"\n\nnamespace caffe2 {\nREGISTER_HIP_OPERATOR(TensorProtosDBInput, TensorProtosDBInput);\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/thresholded_relu_op.h\"\n\nnamespace caffe2 {\nnamespace {\ntemplate \n__global__ void ThresholdedReluKernel(const int N, const T* X, T* Y, T alpha_) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n Y[i] = X[i] > alpha_ ? X[i] : 0;\n }\n}\n\ntemplate \n__global__ void\nThresholdedReluGradientKernel(const int N, const T* Y, const T* dY, T* dX) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n dX[i] = Y[i] > 0 ? dY[i] : 0;\n }\n}\n} // namespace\n\ntemplate <>\nbool ThresholdedReluOp::RunOnDevice() {\n auto& X = Input(0);\n\n CAFFE_ENFORCE_GT(X.numel(), 0);\n auto* Y = Output(0, X.sizes(), at::dtype());\n ThresholdedReluKernel<<<\n CAFFE_GET_BLOCKS(X.numel()),\n CAFFE_CUDA_NUM_THREADS,\n 0,\n context_.cuda_stream()>>>(\n X.numel(), X.data(), Y->template mutable_data(), alpha_);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\ntemplate <>\nbool ThresholdedReluGradientOp::RunOnDevice() {\n auto& Y = Input(0);\n auto& dY = Input(1);\n\n CAFFE_ENFORCE_GT(Y.numel(), 0);\n CAFFE_ENFORCE_EQ(dY.numel(), Y.numel());\n auto* dX = Output(0, Y.sizes(), at::dtype());\n ThresholdedReluGradientKernel<<<\n CAFFE_GET_BLOCKS(Y.numel()),\n CAFFE_CUDA_NUM_THREADS,\n 0,\n context_.cuda_stream()>>>(\n Y.numel(),\n Y.data(),\n dY.data(),\n dX->template mutable_data());\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_CUDA_OPERATOR(ThresholdedRelu, ThresholdedReluOp);\nREGISTER_CUDA_OPERATOR(\n ThresholdedReluGradient,\n ThresholdedReluGradientOp);\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/thresholded_relu_op.h\"\n\nnamespace caffe2 {\nnamespace {\ntemplate \n__global__ void ThresholdedReluKernel(const int N, const T* X, T* Y, T alpha_) {\n HIP_1D_KERNEL_LOOP(i, N) {\n Y[i] = X[i] > alpha_ ? X[i] : 0;\n }\n}\n\ntemplate \n__global__ void\nThresholdedReluGradientKernel(const int N, const T* Y, const T* dY, T* dX) {\n HIP_1D_KERNEL_LOOP(i, N) {\n dX[i] = Y[i] > 0 ? dY[i] : 0;\n }\n}\n} // namespace\n\ntemplate <>\nbool ThresholdedReluOp::RunOnDevice() {\n auto& X = Input(0);\n\n CAFFE_ENFORCE_GT(X.numel(), 0);\n auto* Y = Output(0, X.sizes(), at::dtype());\n hipLaunchKernelGGL(( ThresholdedReluKernel), \n dim3(CAFFE_GET_BLOCKS(X.numel())),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context_.hip_stream(), \n X.numel(), X.data(), Y->template mutable_data(), alpha_);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\ntemplate <>\nbool ThresholdedReluGradientOp::RunOnDevice() {\n auto& Y = Input(0);\n auto& dY = Input(1);\n\n CAFFE_ENFORCE_GT(Y.numel(), 0);\n CAFFE_ENFORCE_EQ(dY.numel(), Y.numel());\n auto* dX = Output(0, Y.sizes(), at::dtype());\n hipLaunchKernelGGL(( ThresholdedReluGradientKernel), \n dim3(CAFFE_GET_BLOCKS(Y.numel())),\n dim3(CAFFE_HIP_NUM_THREADS),\n 0,\n context_.hip_stream(), \n Y.numel(),\n Y.data(),\n dY.data(),\n dX->template mutable_data());\n C10_HIP_KERNEL_LAUNCH_CHECK();\n\n return true;\n}\n\nREGISTER_HIP_OPERATOR(ThresholdedRelu, ThresholdedReluOp);\nREGISTER_HIP_OPERATOR(\n ThresholdedReluGradient,\n ThresholdedReluGradientOp);\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/operators/transpose_op.h\"\n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_CUDA_OPERATOR(Transpose, TransposeOp);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/operators/transpose_op.h\"\n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_HIP_OPERATOR(Transpose, TransposeOp);\n\n} // namespace caffe2\n###" }, { "cuda": "\n\n#include \"caffe2/operators/unique_ops.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \"caffe2/core/context_gpu.h\"\nnamespace caffe2 {\n#if THRUST_VERSION >= 100800\nnamespace {\n__global__ void remap_kernel(\n thrust::device_ptr second_order, thrust::device_ptr order, int* output, int N, int K) {\n int i = blockDim.x * blockIdx.x + threadIdx.x;\n if (i >= K)\n return;\n int idx = second_order[i];\n output[order[idx]] = i;\n \n for (idx++; idx < N && (i == K - 1 || idx != second_order[i + 1]); idx++) {\n output[order[idx]] = i;\n }\n return;\n}\n} \ntemplate <>\ntemplate \nbool UniqueOp::DoRunWithType() {\n auto& inputTensor = Input(0);\n \n int N = inputTensor.dim32(0);\n CAFFE_ENFORCE_EQ(inputTensor.dim(), 1, \"Input should be a vector\");\n int* remapping = nullptr;\n if (REMAPPING < OutputSize()) {\n auto* remappingTensor =\n Output(REMAPPING, inputTensor.sizes(), at::dtype());\n remapping = remappingTensor->template mutable_data();\n }\n if (N <= 0) {\n \n Output(UNIQUE, {0}, at::dtype());\n return true;\n }\n const T* input = inputTensor.template data();\n ReinitializeTensor(&thrust_unique_buffer_, {N}, at::dtype().device(CUDA));\n auto* buffer = thrust_unique_buffer_.template mutable_data();\n context_.CopyItemsSameDevice(inputTensor.meta(), N, input, buffer);\n \n thrust::device_vector order1(N), order2(N);\n thrust::sequence(\n thrust::cuda::par.on(context_.cuda_stream()), order1.begin(), order1.end());\n thrust::sequence(\n thrust::cuda::par.on(context_.cuda_stream()), order2.begin(), order2.end());\n \n \n \n \n \n \n \n thrust::sort_by_key(\n thrust::cuda::par.on(context_.cuda_stream()), buffer, buffer + N, order1.begin());\n \n \n \n \n \n \n auto new_last = thrust::unique_by_key(\n thrust::cuda::par.on(context_.cuda_stream()), buffer, buffer + N, order2.begin());\n int K = new_last.first - buffer;\n auto* uniqueTensor = Output(UNIQUE, {K}, at::dtype());\n T* unique = uniqueTensor->template mutable_data();\n context_.CopyItemsSameDevice(thrust_unique_buffer_.meta(), K, buffer, unique);\n \n \n \n \n if (remapping != nullptr) {\n \n remap_kernel<<<\n CAFFE_GET_BLOCKS(K), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(\n order2.data(), order1.data(), remapping, N, K);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n }\n return true;\n}\nREGISTER_CUDA_OPERATOR(Unique, UniqueOp);\n#endif \n} \n\n###", "hip": " \n#include \"hip/hip_runtime.h\"\n\n#include \"caffe2/operators/unique_ops.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \"caffe2/core/hip/context_gpu.h\"\nnamespace caffe2 {\n#if THRUST_VERSION >= 100800\nnamespace {\n__global__ void remap_kernel(\n thrust::device_ptr second_order, thrust::device_ptr order, int* output, int N, int K) {\n int i = blockDim.x * blockIdx.x + threadIdx.x;\n if (i >= K)\n return;\n int idx = second_order[i];\n output[order[idx]] = i;\n \n for (idx++; idx < N && (i == K - 1 || idx != second_order[i + 1]); idx++) {\n output[order[idx]] = i;\n }\n return;\n}\n} \ntemplate <>\ntemplate \nbool UniqueOp::DoRunWithType() {\n auto& inputTensor = Input(0);\n \n int N = inputTensor.dim32(0);\n CAFFE_ENFORCE_EQ(inputTensor.dim(), 1, \"Input should be a vector\");\n int* remapping = nullptr;\n if (REMAPPING < OutputSize()) {\n auto* remappingTensor =\n Output(REMAPPING, inputTensor.sizes(), at::dtype());\n remapping = remappingTensor->template mutable_data();\n }\n if (N <= 0) {\n \n Output(UNIQUE, {0}, at::dtype());\n return true;\n }\n const T* input = inputTensor.template data();\n ReinitializeTensor(&thrust_unique_buffer_, {N}, at::dtype().device(HIP));\n auto* buffer = thrust_unique_buffer_.template mutable_data();\n context_.CopyItemsSameDevice(inputTensor.meta(), N, input, buffer);\n \n thrust::device_vector order1(N), order2(N);\n thrust::sequence(\n thrust::hip::par.on(context_.hip_stream()), order1.begin(), order1.end());\n thrust::sequence(\n thrust::hip::par.on(context_.hip_stream()), order2.begin(), order2.end());\n \n \n \n \n \n \n \n thrust::sort_by_key(\n thrust::hip::par.on(context_.hip_stream()), buffer, buffer + N, order1.begin());\n \n \n \n \n \n \n auto new_last = thrust::unique_by_key(\n thrust::hip::par.on(context_.hip_stream()), buffer, buffer + N, order2.begin());\n int K = new_last.first - buffer;\n auto* uniqueTensor = Output(UNIQUE, {K}, at::dtype());\n T* unique = uniqueTensor->template mutable_data();\n context_.CopyItemsSameDevice(thrust_unique_buffer_.meta(), K, buffer, unique);\n \n \n \n \n if (remapping != nullptr) {\n \n hipLaunchKernelGGL(( remap_kernel), dim3(CAFFE_GET_BLOCKS(K)), dim3(CAFFE_HIP_NUM_THREADS), 0, context_.hip_stream(), order2.data(), order1.data(), remapping, N, K);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n }\n return true;\n}\nREGISTER_HIP_OPERATOR(Unique, UniqueOp);\n#endif \n} ###" }, { "cuda": "\n#include \"caffe2/operators/unsafe_coalesce.h\"\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_CUDA_OPERATOR(UnsafeCoalesce, UnsafeCoalesceOp);\n\n}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/operators/unsafe_coalesce.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_HIP_OPERATOR(UnsafeCoalesce, UnsafeCoalesceOp);\n\n}\n###" }, { "cuda": "\n#include \n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/core/flags.h\"\n#include \"caffe2/operators/utility_ops.h\"\n#include \n\nC10_DECLARE_string(caffe_test_root);\n\nnamespace caffe2 {\n\nstatic void AddConstInput(\n const vector& shape,\n const float value,\n const string& name,\n Workspace* ws) {\n DeviceOption option;\n option.set_device_type(PROTO_CUDA);\n CUDAContext context(option);\n Blob* blob = ws->CreateBlob(name);\n auto* tensor = BlobGetMutableTensor(blob, CUDA);\n tensor->Resize(shape);\n math::Set(\n tensor->numel(), value, tensor->template mutable_data(), &context);\n return;\n}\n\nTEST(UtilityOpGPUTest, testReshapeWithScalar) {\n if (!HasCudaGPU())\n return;\n Workspace ws;\n OperatorDef def;\n def.set_name(\"test_reshape\");\n def.set_type(\"Reshape\");\n def.add_input(\"X\");\n def.add_output(\"XNew\");\n def.add_output(\"OldShape\");\n def.add_arg()->CopyFrom(MakeArgument(\"shape\", vector{1}));\n def.mutable_device_option()->set_device_type(PROTO_CUDA);\n AddConstInput(vector(), 3.14, \"X\", &ws);\n // execute the op\n unique_ptr op(CreateOperator(def, &ws));\n EXPECT_TRUE(op->Run());\n Blob* XNew = ws.GetBlob(\"XNew\");\n const Tensor& XNewTensor = XNew->Get();\n EXPECT_EQ(1, XNewTensor.dim());\n EXPECT_EQ(1, XNewTensor.numel());\n}\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/core/flags.h\"\n#include \"caffe2/operators/utility_ops.h\"\n#include \n\nC10_DECLARE_string(caffe_test_root);\n\nnamespace caffe2 {\n\nstatic void AddConstInput(\n const vector& shape,\n const float value,\n const string& name,\n Workspace* ws) {\n DeviceOption option;\n option.set_device_type(PROTO_HIP);\n HIPContext context(option);\n Blob* blob = ws->CreateBlob(name);\n auto* tensor = BlobGetMutableTensor(blob, HIP);\n tensor->Resize(shape);\n math::Set(\n tensor->numel(), value, tensor->template mutable_data(), &context);\n return;\n}\n\nTEST(UtilityOpGPUTest, testReshapeWithScalar) {\n if (!HasHipGPU())\n return;\n Workspace ws;\n OperatorDef def;\n def.set_name(\"test_reshape\");\n def.set_type(\"Reshape\");\n def.add_input(\"X\");\n def.add_output(\"XNew\");\n def.add_output(\"OldShape\");\n def.add_arg()->CopyFrom(MakeArgument(\"shape\", vector{1}));\n def.mutable_device_option()->set_device_type(PROTO_HIP);\n AddConstInput(vector(), 3.14, \"X\", &ws);\n // execute the op\n unique_ptr op(CreateOperator(def, &ws));\n EXPECT_TRUE(op->Run());\n Blob* XNew = ws.GetBlob(\"XNew\");\n const Tensor& XNewTensor = XNew->Get();\n EXPECT_EQ(1, XNewTensor.dim());\n EXPECT_EQ(1, XNewTensor.numel());\n}\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/operators/while_op.h\"\n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_CUDA_OPERATOR(While, WhileOp);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/operators/while_op.h\"\n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_HIP_OPERATOR(While, WhileOp);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/zero_gradient_op.h\"\n\nnamespace caffe2 {\nREGISTER_CUDA_OPERATOR(ZeroGradient, ZeroGradientOp);\n}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/zero_gradient_op.h\"\n\nnamespace caffe2 {\nREGISTER_HIP_OPERATOR(ZeroGradient, ZeroGradientOp);\n}\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/rnn/recurrent_network_blob_fetcher_op.h\"\n\nnamespace caffe2 {\nREGISTER_CUDA_OPERATOR(\n RecurrentNetworkBlobFetcher,\n RecurrentNetworkBlobFetcherOp);\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/rnn/recurrent_network_blob_fetcher_op.h\"\n\nnamespace caffe2 {\nREGISTER_HIP_OPERATOR(\n RecurrentNetworkBlobFetcher,\n RecurrentNetworkBlobFetcherOp);\n} // namespace caffe2\n###" }, { "cuda": "\n#ifndef CAFFE2_OPERATORS_RECURRENT_NETWORK_GPU_EXECUTOR_H_\n#define CAFFE2_OPERATORS_RECURRENT_NETWORK_GPU_EXECUTOR_H_\n\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/rnn/recurrent_network_executor.h\"\n\n\n#include \n\nnamespace caffe2 {\n\nclass CUDARecurrentNetworkExecutor : public RecurrentNetworkExecutorBase {\n public:\n CUDARecurrentNetworkExecutor(\n const NetDef& step_net_def,\n std::map& recurrent_input_map,\n std::string timestep_blob)\n : RecurrentNetworkExecutorBase(step_net_def, recurrent_input_map, timestep_blob) {}\n\n ~CUDARecurrentNetworkExecutor();\n\n protected:\n bool Run(int T) override;\n\n bool RunBackwards(int T) override;\n\n bool ignoreLinkDependencies() override {\n return true;\n }\n\n void AnalyzeOps() override {\n /**\n * Check if there is an op that only depends on ops from previous\n * timestep, and that ops is not the last op. Then we can start computation\n * in subsequent timesteps before the whole previous timestep has finished.\n * If there is no parallelism, we can avoid overhead of event-based\n * dependency management.\n */\n has_timestep_parallelism_ = false;\n for (auto& rnn_op : timestep_ops_template_) {\n int i = rnn_op.order;\n if (rnn_op.parents.size() >= 1 && i < timestep_ops_template_.size() - 1) {\n bool only_recurrent_deps = std::all_of(\n rnn_op.parents.begin(),\n rnn_op.parents.end(), [&](const int &parent) {\n return parent > i;\n }\n );\n if (only_recurrent_deps) {\n VLOG(1) << \"Timestep parallel op: \" << ProtoDebugString(step_net_def_.op(i));\n has_timestep_parallelism_ = true;\n\n for (int dep : rnn_op.parents) {\n if (dep == timestep_ops_template_.size() - 1) {\n // This op depends on the last op of the previous iteration,\n // so it will block any parallelism\n has_timestep_parallelism_ = false;\n break;\n }\n }\n break;\n }\n }\n }\n LOG(INFO) << \"Analyzed ops for timestep parallelism: \" << has_timestep_parallelism_;\n }\n\n public:\n\n void setMaxStreams(int n) {\n max_cuda_streams_ = n;\n }\n\n private:\n void _ExecRange(int from, int to);\n\n std::vector events_;\n bool has_timestep_parallelism_ = false;\n int max_cuda_streams_ = 2;\n};\n}\n#endif\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#ifndef CAFFE2_OPERATORS_RECURRENT_NETWORK_GPU_EXECUTOR_H_\n#define CAFFE2_OPERATORS_RECURRENT_NETWORK_GPU_EXECUTOR_H_\n\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/rnn/recurrent_network_executor.h\"\n\n\n#include \n\nnamespace caffe2 {\n\nclass HIPRecurrentNetworkExecutor : public RecurrentNetworkExecutorBase {\n public:\n HIPRecurrentNetworkExecutor(\n const NetDef& step_net_def,\n std::map& recurrent_input_map,\n std::string timestep_blob)\n : RecurrentNetworkExecutorBase(step_net_def, recurrent_input_map, timestep_blob) {}\n\n ~HIPRecurrentNetworkExecutor();\n\n protected:\n bool Run(int T) override;\n\n bool RunBackwards(int T) override;\n\n bool ignoreLinkDependencies() override {\n return true;\n }\n\n void AnalyzeOps() override {\n /**\n * Check if there is an op that only depends on ops from previous\n * timestep, and that ops is not the last op. Then we can start computation\n * in subsequent timesteps before the whole previous timestep has finished.\n * If there is no parallelism, we can avoid overhead of event-based\n * dependency management.\n */\n has_timestep_parallelism_ = false;\n for (auto& rnn_op : timestep_ops_template_) {\n int i = rnn_op.order;\n if (rnn_op.parents.size() >= 1 && i < timestep_ops_template_.size() - 1) {\n bool only_recurrent_deps = std::all_of(\n rnn_op.parents.begin(),\n rnn_op.parents.end(), [&](const int &parent) {\n return parent > i;\n }\n );\n if (only_recurrent_deps) {\n VLOG(1) << \"Timestep parallel op: \" << ProtoDebugString(step_net_def_.op(i));\n has_timestep_parallelism_ = true;\n\n for (int dep : rnn_op.parents) {\n if (dep == timestep_ops_template_.size() - 1) {\n // This op depends on the last op of the previous iteration,\n // so it will block any parallelism\n has_timestep_parallelism_ = false;\n break;\n }\n }\n break;\n }\n }\n }\n LOG(INFO) << \"Analyzed ops for timestep parallelism: \" << has_timestep_parallelism_;\n }\n\n public:\n\n void setMaxStreams(int n) {\n max_hip_streams_ = n;\n }\n\n private:\n void _ExecRange(int from, int to);\n\n std::vector events_;\n bool has_timestep_parallelism_ = false;\n int max_hip_streams_ = 2;\n};\n}\n#endif\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#define _USE_MATH_DEFINES\n\n#include \n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\nnamespace {\n\ntemplate \nvoid threshold_kernel_impl(\n TensorIteratorBase& iter,\n scalar_t threshold,\n scalar_t value) {\n gpu_kernel_with_scalars(\n iter, [=] GPU_LAMBDA(scalar_t x, scalar_t other) -> scalar_t {\n return x <= threshold ? value : other;\n });\n}\n\nstatic void threshold_kernel_cuda(\n TensorIteratorBase& iter,\n const Scalar& threshold,\n const Scalar& value) {\n AT_DISPATCH_ALL_TYPES_AND2(\n at::ScalarType::Half,\n at::ScalarType::BFloat16,\n iter.dtype(),\n \"threshold_cuda\",\n [&] {\n threshold_kernel_impl(\n iter, threshold.to(), value.to());\n });\n}\n\n} // namespace\n\nREGISTER_DISPATCH(threshold_stub, &threshold_kernel_cuda);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#define _USE_MATH_DEFINES\n\n#include \n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\nnamespace {\n\ntemplate \nvoid threshold_kernel_impl(\n TensorIteratorBase& iter,\n scalar_t threshold,\n scalar_t value) {\n gpu_kernel_with_scalars(\n iter, [=] GPU_LAMBDA(scalar_t x, scalar_t other) -> scalar_t {\n return x <= threshold ? value : other;\n });\n}\n\nstatic void threshold_kernel_hip(\n TensorIteratorBase& iter,\n const Scalar& threshold,\n const Scalar& value) {\n AT_DISPATCH_ALL_TYPES_AND2(\n at::ScalarType::Half,\n at::ScalarType::BFloat16,\n iter.dtype(),\n \"threshold_hip\",\n [&] {\n threshold_kernel_impl(\n iter, threshold.to(), value.to());\n });\n}\n\n} // namespace\n\nREGISTER_DISPATCH(threshold_stub, &threshold_kernel_hip);\n\n} // namespace at::native\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/rnn/recurrent_network_op.h\"\n\nnamespace caffe2 {\n\nnamespace detail {\n\ntemplate \nvoid initializeRecurrentInput(\n const RecurrentInput& rc,\n int32_t seqLen,\n int32_t batchSize,\n Workspace* ws,\n Context* context);\n\nnamespace {\n\ntemplate \n__global__\nvoid initRecurrentInput_kernel(\n size_t stateSize,\n const T* input,\n T* state) {\n // index into appropriate target buffer\n const int block_id = blockIdx.x;\n T* state_local = state + block_id*stateSize;\n\n // copy\n for (int idx=threadIdx.x; idx < stateSize; idx+=blockDim.x) {\n state_local[idx] = input[idx];\n }\n}\n\n\n}; // namespace\n\ntemplate <>\nvoid repeatCopy(\n size_t repeat_n,\n size_t n,\n const float* src,\n float* dst,\n CUDAContext* context) {\n initRecurrentInput_kernel<<cuda_stream()>>>(\n n, src, dst);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n}\ntemplate <>\nvoid repeatCopy(\n size_t repeat_n,\n size_t n,\n const at::Half* src,\n at::Half* dst,\n CUDAContext* context) {\n initRecurrentInput_kernel<<cuda_stream()>>>(\n n, src, dst);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n}\n\n}; // namespace detail\n\ntemplate <>\nbool RecurrentNetworkOp::RunOnDevice() {\n return DispatchHelper>::call(this, Input(0));\n}\n\ntemplate <>\nbool RecurrentNetworkGradientOp::RunOnDevice() {\n return DispatchHelper>::call(this, Input(0));\n}\n\ntemplate <>\nbool AccumulateInputGradientOp::RunOnDevice() {\n return DispatchHelper>::call(this, Input(1));\n}\n\ntemplate <>\nbool RNNApplyLinkOp::RunOnDevice() {\n return DispatchHelper>::call(this, Input(1));\n}\n\nREGISTER_CUDA_OPERATOR(\n RecurrentNetwork,\n RecurrentNetworkOp);\nREGISTER_CUDA_OPERATOR(\n RecurrentNetworkGradient,\n RecurrentNetworkGradientOp);\nREGISTER_CUDA_OPERATOR(\n rnn_internal_accumulate_gradient_input,\n AccumulateInputGradientOp);\nREGISTER_CUDA_OPERATOR(\n rnn_internal_apply_link,\n RNNApplyLinkOp);\n\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/operators/rnn/recurrent_network_op.h\"\n\nnamespace caffe2 {\n\nnamespace detail {\n\ntemplate \nvoid initializeRecurrentInput(\n const RecurrentInput& rc,\n int32_t seqLen,\n int32_t batchSize,\n Workspace* ws,\n Context* context);\n\nnamespace {\n\ntemplate \n__global__\nvoid initRecurrentInput_kernel(\n size_t stateSize,\n const T* input,\n T* state) {\n // index into appropriate target buffer\n const int block_id = blockIdx.x;\n T* state_local = state + block_id*stateSize;\n\n // copy\n for (int idx=threadIdx.x; idx < stateSize; idx+=blockDim.x) {\n state_local[idx] = input[idx];\n }\n}\n\n\n}; // namespace\n\ntemplate <>\nvoid repeatCopy(\n size_t repeat_n,\n size_t n,\n const float* src,\n float* dst,\n HIPContext* context) {\n hipLaunchKernelGGL(( initRecurrentInput_kernel), dim3(repeat_n), dim3(CAFFE_HIP_NUM_THREADS), 0, context->hip_stream(), \n n, src, dst);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n}\ntemplate <>\nvoid repeatCopy(\n size_t repeat_n,\n size_t n,\n const at::Half* src,\n at::Half* dst,\n HIPContext* context) {\n hipLaunchKernelGGL(( initRecurrentInput_kernel), dim3(repeat_n), dim3(CAFFE_HIP_NUM_THREADS), 0, context->hip_stream(), \n n, src, dst);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n}\n\n}; // namespace detail\n\ntemplate <>\nbool RecurrentNetworkOp::RunOnDevice() {\n return DispatchHelper>::call(this, Input(0));\n}\n\ntemplate <>\nbool RecurrentNetworkGradientOp::RunOnDevice() {\n return DispatchHelper>::call(this, Input(0));\n}\n\ntemplate <>\nbool AccumulateInputGradientOp::RunOnDevice() {\n return DispatchHelper>::call(this, Input(1));\n}\n\ntemplate <>\nbool RNNApplyLinkOp::RunOnDevice() {\n return DispatchHelper>::call(this, Input(1));\n}\n\nREGISTER_HIP_OPERATOR(\n RecurrentNetwork,\n RecurrentNetworkOp);\nREGISTER_HIP_OPERATOR(\n RecurrentNetworkGradient,\n RecurrentNetworkGradientOp);\nREGISTER_HIP_OPERATOR(\n rnn_internal_accumulate_gradient_input,\n AccumulateInputGradientOp);\nREGISTER_HIP_OPERATOR(\n rnn_internal_apply_link,\n RNNApplyLinkOp);\n\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/queue/queue_ops.h\"\n#include \"caffe2/utils/math.h\"\n\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_CUDA_OPERATOR(CreateBlobsQueue, CreateBlobsQueueOp);\nREGISTER_CUDA_OPERATOR(EnqueueBlobs, EnqueueBlobsOp);\nREGISTER_CUDA_OPERATOR(DequeueBlobs, DequeueBlobsOp);\nREGISTER_CUDA_OPERATOR(CloseBlobsQueue, CloseBlobsQueueOp);\n\nREGISTER_CUDA_OPERATOR(SafeEnqueueBlobs, SafeEnqueueBlobsOp);\nREGISTER_CUDA_OPERATOR(SafeDequeueBlobs, SafeDequeueBlobsOp);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/queue/queue_ops.h\"\n#include \"caffe2/utils/math.h\"\n\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\nREGISTER_HIP_OPERATOR(CreateBlobsQueue, CreateBlobsQueueOp);\nREGISTER_HIP_OPERATOR(EnqueueBlobs, EnqueueBlobsOp);\nREGISTER_HIP_OPERATOR(DequeueBlobs, DequeueBlobsOp);\nREGISTER_HIP_OPERATOR(CloseBlobsQueue, CloseBlobsQueueOp);\n\nREGISTER_HIP_OPERATOR(SafeEnqueueBlobs, SafeEnqueueBlobsOp);\nREGISTER_HIP_OPERATOR(SafeDequeueBlobs, SafeDequeueBlobsOp);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/sgd/iter_op.h\"\n\nnamespace caffe2 {\n\nREGISTER_CUDA_OPERATOR(Iter, IterOp);\nREGISTER_CUDA_OPERATOR(AtomicIter, AtomicIterOp);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/sgd/iter_op.h\"\n\nnamespace caffe2 {\n\nREGISTER_HIP_OPERATOR(Iter, IterOp);\nREGISTER_HIP_OPERATOR(AtomicIter, AtomicIterOp);\n\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/sgd/lars_op.h\"\n\nnamespace caffe2 {\n__global__ void ComputeLearningRateKernel(\n const float* wd,\n const float* trust,\n const float* lr_max,\n float offset,\n float lr_min,\n float* X_norm,\n float* dX_norm,\n float* lr_rescaled) {\n float val = 1.0;\n\n if (*X_norm > 0) {\n val = (*trust) / (*dX_norm / *X_norm + (*wd) + offset);\n }\n *lr_rescaled = fmaxf(fminf(val, *lr_max), lr_min);\n}\n\ntemplate <>\nvoid LarsOp::ComputeLearningRate(\n const float* wd,\n const float* trust,\n const float* lr_max,\n float offset,\n float lr_min,\n float* X_norm,\n float* dX_norm,\n float* lr_rescaled) {\n ComputeLearningRateKernel<<<1, 1, 0, context_.cuda_stream()>>>(\n wd, trust, lr_max, offset, lr_min, X_norm, dX_norm, lr_rescaled);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n}\n\nREGISTER_CUDA_OPERATOR(Lars, LarsOp);\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/sgd/lars_op.h\"\n\nnamespace caffe2 {\n__global__ void ComputeLearningRateKernel(\n const float* wd,\n const float* trust,\n const float* lr_max,\n float offset,\n float lr_min,\n float* X_norm,\n float* dX_norm,\n float* lr_rescaled) {\n float val = 1.0;\n\n if (*X_norm > 0) {\n val = (*trust) / (*dX_norm / *X_norm + (*wd) + offset);\n }\n *lr_rescaled = fmaxf(fminf(val, *lr_max), lr_min);\n}\n\ntemplate <>\nvoid LarsOp::ComputeLearningRate(\n const float* wd,\n const float* trust,\n const float* lr_max,\n float offset,\n float lr_min,\n float* X_norm,\n float* dX_norm,\n float* lr_rescaled) {\n hipLaunchKernelGGL(( ComputeLearningRateKernel), dim3(1), dim3(1), 0, context_.hip_stream(), \n wd, trust, lr_max, offset, lr_min, X_norm, dX_norm, lr_rescaled);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n}\n\nREGISTER_HIP_OPERATOR(Lars, LarsOp);\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/sgd/learning_rate_op.h\"\n\nnamespace caffe2 {\nREGISTER_CUDA_OPERATOR(LearningRate, LearningRateOp);\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/sgd/learning_rate_op.h\"\n\nnamespace caffe2 {\nREGISTER_HIP_OPERATOR(LearningRate, LearningRateOp);\n} // namespace caffe2\n###" }, { "cuda": "\n#include \"caffe2/sgd/rmsprop_op.h\"\n#include \"caffe2/core/common_gpu.h\"\n#include \"caffe2/core/context_gpu.h\"\n\nnamespace caffe2 {\n\n__global__ void RmsPropUpdate(\n int N,\n const float* g,\n const float* ms,\n const float* mom,\n float* ng,\n float* nms,\n float* nmom,\n float decay,\n float momentum,\n float epsilon,\n const float* lr) {\n CUDA_1D_KERNEL_LOOP(i, N) {\n // Update new mean square estimate\n nms[i] = ms[i] + (1.0f - decay) * (g[i] * g[i] - ms[i]);\n // Update momentum estimate\n nmom[i] =\n mom[i] * momentum + lr[0] * g[i] / sqrtf(epsilon + nms[i]);\n // New gradient is the momentum\n ng[i] = nmom[i];\n }\n}\n\ntemplate <>\nvoid rmsprop_update(\n int N,\n const float* g,\n const float* ms,\n const float* mom,\n float* ng,\n float* nms,\n float* nmom,\n float decay,\n float momentum,\n float epsilon,\n const float* lr,\n CUDAContext* context) {\n RmsPropUpdate<<cuda_stream()>>>(\n N, g, ms, mom, ng, nms, nmom, decay, momentum, epsilon, lr);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n}\n\n\nREGISTER_CUDA_OPERATOR(RmsProp, RmsPropOp);\n\n}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \"caffe2/sgd/rmsprop_op.h\"\n#include \"caffe2/core/hip/common_gpu.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\n\nnamespace caffe2 {\n\n__global__ void RmsPropUpdate(\n int N,\n const float* g,\n const float* ms,\n const float* mom,\n float* ng,\n float* nms,\n float* nmom,\n float decay,\n float momentum,\n float epsilon,\n const float* lr) {\n HIP_1D_KERNEL_LOOP(i, N) {\n // Update new mean square estimate\n nms[i] = ms[i] + (1.0f - decay) * (g[i] * g[i] - ms[i]);\n // Update momentum estimate\n nmom[i] =\n mom[i] * momentum + lr[0] * g[i] / sqrtf(epsilon + nms[i]);\n // New gradient is the momentum\n ng[i] = nmom[i];\n }\n}\n\ntemplate <>\nvoid rmsprop_update(\n int N,\n const float* g,\n const float* ms,\n const float* mom,\n float* ng,\n float* nms,\n float* nmom,\n float decay,\n float momentum,\n float epsilon,\n const float* lr,\n HIPContext* context) {\n hipLaunchKernelGGL(( RmsPropUpdate), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_HIP_NUM_THREADS), 0, context->hip_stream(), \n N, g, ms, mom, ng, nms, nmom, decay, momentum, epsilon, lr);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n}\n\n\nREGISTER_HIP_OPERATOR(RmsProp, RmsPropOp);\n\n}\n###" }, { "cuda": "\n#include \"caffe2/core/common_gpu.h\"\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/sgd/weight_scale_op.h\"\n\nnamespace caffe2 {\nREGISTER_CUDA_OPERATOR(WeightScale, WeightScaleOp);\n\ntemplate \nvoid weight_scale_update_kernel(\n int N,\n const T* w,\n const T& scale,\n int64_t iter,\n int64_t stepsize,\n int64_t update_upper_bound,\n T* nw,\n CUDAContext* context) {\n const auto w_size = N * sizeof(float);\n if (iter % stepsize != 0 || iter >= update_upper_bound) {\n (void)cudaMemcpy(nw, w, w_size, cudaMemcpyDefault);\n } else {\n // perform the weight scaling\n caffe2::math::Scale(N, scale, w, nw, context);\n }\n}\n\ntemplate <>\ntemplate \nbool WeightScaleOp::DoRunWithType() {\n const auto iter =\n OperatorBase::Input(ITER, CPU).template data()[0] + 1;\n weight_scale_update_kernel(\n Input(WEIGHTS).size(),\n Input(WEIGHTS).template data(),\n scale_,\n iter,\n stepsize_,\n update_upper_bound_,\n Output(OUTPUT_WEIGHTS)->template mutable_data(),\n &context_);\n return true;\n}\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"caffe2/core/hip/common_gpu.h\"\n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"caffe2/sgd/weight_scale_op.h\"\n\nnamespace caffe2 {\nREGISTER_HIP_OPERATOR(WeightScale, WeightScaleOp);\n\ntemplate \nvoid weight_scale_update_kernel(\n int N,\n const T* w,\n const T& scale,\n int64_t iter,\n int64_t stepsize,\n int64_t update_upper_bound,\n T* nw,\n HIPContext* context) {\n const auto w_size = N * sizeof(float);\n if (iter % stepsize != 0 || iter >= update_upper_bound) {\n (void)hipMemcpy(nw, w, w_size, hipMemcpyDefault);\n } else {\n // perform the weight scaling\n caffe2::math::Scale(N, scale, w, nw, context);\n }\n}\n\ntemplate <>\ntemplate \nbool WeightScaleOp::DoRunWithType() {\n const auto iter =\n OperatorBase::Input(ITER, CPU).template data()[0] + 1;\n weight_scale_update_kernel(\n Input(WEIGHTS).size(),\n Input(WEIGHTS).template data(),\n scale_,\n iter,\n stepsize_,\n update_upper_bound_,\n Output(OUTPUT_WEIGHTS)->template mutable_data(),\n &context_);\n return true;\n}\n\n} // namespace caffe2\n###" }, { "cuda": "\n#pragma once\n\n// cub sort support for CUB_WRAPPED_NAMESPACE is added to cub 1.13.1 in:\n// https://github.com/NVIDIA/cub/pull/326\n// CUB_WRAPPED_NAMESPACE is defined globally in cmake/Dependencies.cmake\n// starting from CUDA 11.5\n#if defined(CUB_WRAPPED_NAMESPACE) || defined(THRUST_CUB_WRAPPED_NAMESPACE)\n#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() true\n#else\n#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() false\n#endif\n\n#if USE_GLOBAL_CUB_WRAPPED_NAMESPACE()\nnamespace caffe2 {\nnamespace cub = ::CUB_WRAPPED_NAMESPACE::cub;\n}\n#endif\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#pragma once\n\n// cub sort support for CUB_WRAPPED_NAMESPACE is added to cub 1.13.1 in:\n// https://github.com/NVIDIA/cub/pull/326\n// CUB_WRAPPED_NAMESPACE is defined globally in cmake/Dependencies.cmake\n// starting from HIP 11.5\n#if defined(CUB_WRAPPED_NAMESPACE) || defined(THRUST_CUB_WRAPPED_NAMESPACE)\n#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() true\n#else\n#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() false\n#endif\n\n#if USE_GLOBAL_CUB_WRAPPED_NAMESPACE()\nnamespace caffe2 {\nnamespace cub = ::CUB_WRAPPED_NAMESPACE::cub;\n}\n#endif\n###" }, { "cuda": "\n#ifndef CAFFE2_UTILS_GPU_ATOMICS_H_\n#define CAFFE2_UTILS_GPU_ATOMICS_H_\n\n#include \n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \ninline __device__ void gpu_atomic_add(T* address, const T val) {\n atomicAdd(address, val);\n}\n\ntemplate <>\ninline __device__ void gpu_atomic_add(float* address, const float val) {\n#if defined(USE_ROCM) && defined(__gfx908__)\n atomicAddNoRet(address, val);\n#else\n atomicAdd(address, val);\n#endif\n}\n\n} // namespace\n\n} // namespace caffe2\n\n#endif // CAFFE2_UTILS_GPU_ATOMICS_H_\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#ifndef CAFFE2_UTILS_GPU_ATOMICS_H_\n#define CAFFE2_UTILS_GPU_ATOMICS_H_\n\n#include \n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate \ninline __device__ void gpu_atomic_add(T* address, const T val) {\n atomicAdd(address, val);\n}\n\ntemplate <>\ninline __device__ void gpu_atomic_add(float* address, const float val) {\n#if defined(USE_ROCM) && defined(__gfx908__)\n atomicAddNoRet(address, val);\n#else\n atomicAdd(address, val);\n#endif\n}\n\n} // namespace\n\n} // namespace caffe2\n\n#endif // CAFFE2_UTILS_GPU_ATOMICS_H_\n###" }, { "cuda": "\n#ifndef CAFFE2_UTILS_GPU_SCAN_UTILS_H_\n#define CAFFE2_UTILS_GPU_SCAN_UTILS_H_\n#include \"caffe2/utils/GpuDefs.cuh\"\nnamespace caffe2 {\n\n\n\n\ntemplate \n__device__ void inclusivePrefixScan(T* smem, T in, T* out, BinaryFunction binop) {\n \n \n smem[threadIdx.x] = in;\n __syncthreads();\n for (int offset = 1; offset < blockDim.x; offset *= 2) {\n T val = 0;\n if (threadIdx.x >= offset) {\n val = binop(smem[threadIdx.x - offset], smem[threadIdx.x]);\n }\n __syncthreads();\n if (threadIdx.x >= offset) {\n smem[threadIdx.x] = val;\n }\n __syncthreads();\n }\n *out = smem[threadIdx.x];\n \n if (KillWARDependency) {\n __syncthreads();\n }\n}\n\ntemplate \n__device__ void exclusivePrefixScan(T* smem, T in, T* out, T* carry, BinaryFunction binop) {\n \n \n inclusivePrefixScan(smem, in, out, binop);\n *out -= in;\n *carry = smem[blockDim.x - 1];\n \n if (KillWARDependency) {\n __syncthreads();\n }\n}\n\n\ntemplate \n__device__ void inclusiveBinaryPrefixScan(T* smem, bool in, T* out, BinaryFunction binop) {\n \n#if defined(USE_ROCM)\n unsigned long long int vote = __ballot(in);\n T index = __popcll(getLaneMaskLe() & vote);\n T carry = __popcll(vote);\n#else\n T vote = __ballot_sync(__activemask(), in);\n T index = __popc(getLaneMaskLe() & vote);\n T carry = __popc(vote);\n#endif \n int warp = threadIdx.x / kWarpSize;\n \n if (getLaneId() == 0) {\n smem[warp] = carry;\n }\n __syncthreads();\n \n \n if (threadIdx.x == 0) {\n int current = 0;\n for (int i = 0; i < blockDim.x / kWarpSize; ++i) {\n T v = smem[i];\n smem[i] = binop(smem[i], current);\n current = binop(current, v);\n }\n }\n __syncthreads();\n \n if (warp >= 1) {\n index = binop(index, smem[warp - 1]);\n }\n *out = index;\n if (KillWARDependency) {\n __syncthreads();\n }\n}\n\n\ntemplate \n__device__ void exclusiveBinaryPrefixScan(T* smem, bool in, T* out, T* carry, BinaryFunction binop) {\n inclusiveBinaryPrefixScan(smem, in, out, binop);\n \n *out -= (T) in;\n \n#if defined(USE_ROCM)\n *carry = smem[math::DivUp(blockDim.x, kWarpSize) - 1];\n#else\n *carry = smem[(blockDim.x / kWarpSize) - 1];\n#endif \n if (KillWARDependency) {\n __syncthreads();\n }\n}\n} \n#endif \n\n###", "hip": " \n#include \"hip/hip_runtime.h\"\n#ifndef CAFFE2_UTILS_GPU_SCAN_UTILS_H_\n#define CAFFE2_UTILS_GPU_SCAN_UTILS_H_\n#include \"caffe2/utils/hip/GpuDefs.cuh\"\nnamespace caffe2 {\n\n\n\n\ntemplate \n__device__ void inclusivePrefixScan(T* smem, T in, T* out, BinaryFunction binop) {\n \n \n smem[threadIdx.x] = in;\n __syncthreads();\n for (int offset = 1; offset < blockDim.x; offset *= 2) {\n T val = 0;\n if (threadIdx.x >= offset) {\n val = binop(smem[threadIdx.x - offset], smem[threadIdx.x]);\n }\n __syncthreads();\n if (threadIdx.x >= offset) {\n smem[threadIdx.x] = val;\n }\n __syncthreads();\n }\n *out = smem[threadIdx.x];\n \n if (KillWARDependency) {\n __syncthreads();\n }\n}\n\ntemplate \n__device__ void exclusivePrefixScan(T* smem, T in, T* out, T* carry, BinaryFunction binop) {\n \n \n inclusivePrefixScan(smem, in, out, binop);\n *out -= in;\n *carry = smem[blockDim.x - 1];\n \n if (KillWARDependency) {\n __syncthreads();\n }\n}\n\n\ntemplate \n__device__ void inclusiveBinaryPrefixScan(T* smem, bool in, T* out, BinaryFunction binop) {\n \n#if defined(USE_ROCM)\n unsigned long long int vote = __ballot(in);\n T index = __popcll(getLaneMaskLe() & vote);\n T carry = __popcll(vote);\n#else\n T vote = __ballot_sync(__activemask(), in);\n T index = __popc(getLaneMaskLe() & vote);\n T carry = __popc(vote);\n#endif \n int warp = threadIdx.x / kWarpSize;\n \n if (getLaneId() == 0) {\n smem[warp] = carry;\n }\n __syncthreads();\n \n \n if (threadIdx.x == 0) {\n int current = 0;\n for (int i = 0; i < blockDim.x / kWarpSize; ++i) {\n T v = smem[i];\n smem[i] = binop(smem[i], current);\n current = binop(current, v);\n }\n }\n __syncthreads();\n \n if (warp >= 1) {\n index = binop(index, smem[warp - 1]);\n }\n *out = index;\n if (KillWARDependency) {\n __syncthreads();\n }\n}\n\n\ntemplate \n__device__ void exclusiveBinaryPrefixScan(T* smem, bool in, T* out, T* carry, BinaryFunction binop) {\n inclusiveBinaryPrefixScan(smem, in, out, binop);\n \n *out -= (T) in;\n \n#if defined(USE_ROCM)\n *carry = smem[math::DivUp(blockDim.x, kWarpSize) - 1];\n#else\n *carry = smem[(blockDim.x / kWarpSize) - 1];\n#endif \n if (KillWARDependency) {\n __syncthreads();\n }\n}\n} \n#endif ###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\nnamespace {\nCONSTEXPR_EXCEPT_WIN_CUDA char airy_ai_name[] = \"airy_ai_forward\";\n\nvoid airy_ai_kernel_cuda(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"airy_ai_cuda\", [&]() {\n jitted_gpu_kernel(iterator, airy_ai_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"airy_ai_cuda\", [&]() {\n gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {\n return airy_ai_forward(a);\n });\n });\n#endif // AT_USE_JITERATOR()\n}\n\n} // anonymous namespace\n\nREGISTER_DISPATCH(special_airy_ai_stub, &airy_ai_kernel_cuda);\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\nnamespace {\nCONSTEXPR_EXCEPT_WIN_HIP char airy_ai_name[] = \"airy_ai_forward\";\n\nvoid airy_ai_kernel_hip(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"airy_ai_hip\", [&]() {\n jitted_gpu_kernel(iterator, airy_ai_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"airy_ai_hip\", [&]() {\n gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {\n return airy_ai_forward(a);\n });\n });\n#endif // AT_USE_JITERATOR()\n}\n\n} // anonymous namespace\n\nREGISTER_DISPATCH(special_airy_ai_stub, &airy_ai_kernel_hip);\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \nnamespace at {\nnamespace cuda {\nnamespace cub {\nnamespace detail {\ntemplate \nvoid radix_sort_pairs_impl(\n const key_t* keys_in, key_t* keys_out, const OpaqueType* values_in, OpaqueType* values_out, int64_t n, bool descending, int64_t begin_bit, int64_t end_bit) {\n TORCH_CHECK(\n n <= std::numeric_limits::max(), \"cub sort does not support sorting more than INT_MAX elements\");\n using key_t_ = typename detail::cuda_type::type;\n auto allocator = c10::cuda::CUDACachingAllocator::get();\n c10::DataPtr keys_out_owner;\n if (keys_out == nullptr) {\n keys_out_owner = allocator->allocate(n * sizeof(key_t));\n keys_out = reinterpret_cast(keys_out_owner.get());\n }\n const key_t_* keys_in_ = reinterpret_cast(keys_in);\n key_t_* keys_out_ = reinterpret_cast(keys_out);\n if (descending) {\n CUB_WRAPPER(\n NO_ROCM(at_cuda_detail)::cub::DeviceRadixSort::SortPairsDescending, keys_in_, keys_out_, values_in, values_out, n, begin_bit, end_bit, c10::cuda::getCurrentCUDAStream());\n } else {\n CUB_WRAPPER(\n NO_ROCM(at_cuda_detail)::cub::DeviceRadixSort::SortPairs, keys_in_, keys_out_, values_in, values_out, n, begin_bit, end_bit, c10::cuda::getCurrentCUDAStream());\n }\n}\n#define AT_INSTANTIATE_SORT_PAIRS(key_t, value_size) template void radix_sort_pairs_impl( const key_t* keys_in, key_t* keys_out, const OpaqueType* values_in, OpaqueType* values_out, int64_t n, bool descending, int64_t begin_bit, int64_t end_bit);\nAT_INSTANTIATE_SORT_PAIRS(int32_t, 1)\nAT_INSTANTIATE_SORT_PAIRS(int32_t, 2)\nAT_INSTANTIATE_SORT_PAIRS(int32_t, 4)\nAT_INSTANTIATE_SORT_PAIRS(int64_t, 1)\nAT_INSTANTIATE_SORT_PAIRS(int64_t, 2)\nAT_INSTANTIATE_SORT_PAIRS(int64_t, 4)\n#define AT_INSTANTIATE_SORT_PAIRS_8(scalar_t, ScalarType) AT_INSTANTIATE_SORT_PAIRS(scalar_t, 8)\nAT_FORALL_SCALAR_TYPES_AND2(Bool, Half, AT_INSTANTIATE_SORT_PAIRS_8)\n\n#if !AT_ROCM_ENABLED() || (AT_ROCM_ENABLED() && ROCM_VERSION >= 40500)\nAT_INSTANTIATE_SORT_PAIRS(c10::BFloat16, 8)\n#endif\n} \n} \n} \n} \n\n###", "hip": " \n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \nnamespace at {\nnamespace hip {\nnamespace cub {\nnamespace detail {\ntemplate \nvoid radix_sort_pairs_impl(\n const key_t* keys_in, key_t* keys_out, const OpaqueType* values_in, OpaqueType* values_out, int64_t n, bool descending, int64_t begin_bit, int64_t end_bit) {\n TORCH_CHECK(\n n <= std::numeric_limits::max(), \"cub sort does not support sorting more than INT_MAX elements\");\n using key_t_ = typename detail::hip_type::type;\n auto allocator = c10::hip::HIPCachingAllocator::get();\n c10::DataPtr keys_out_owner;\n if (keys_out == nullptr) {\n keys_out_owner = allocator->allocate(n * sizeof(key_t));\n keys_out = reinterpret_cast(keys_out_owner.get());\n }\n const key_t_* keys_in_ = reinterpret_cast(keys_in);\n key_t_* keys_out_ = reinterpret_cast(keys_out);\n if (descending) {\n CUB_WRAPPER(\n NO_ROCM(at_hip_detail)::hipcub::DeviceRadixSort::SortPairsDescending, keys_in_, keys_out_, values_in, values_out, n, begin_bit, end_bit, c10::hip::getCurrentHIPStream());\n } else {\n CUB_WRAPPER(\n NO_ROCM(at_hip_detail)::hipcub::DeviceRadixSort::SortPairs, keys_in_, keys_out_, values_in, values_out, n, begin_bit, end_bit, c10::hip::getCurrentHIPStream());\n }\n}\n#define AT_INSTANTIATE_SORT_PAIRS(key_t, value_size) template void radix_sort_pairs_impl( const key_t* keys_in, key_t* keys_out, const OpaqueType* values_in, OpaqueType* values_out, int64_t n, bool descending, int64_t begin_bit, int64_t end_bit);\nAT_INSTANTIATE_SORT_PAIRS(int32_t, 1)\nAT_INSTANTIATE_SORT_PAIRS(int32_t, 2)\nAT_INSTANTIATE_SORT_PAIRS(int32_t, 4)\nAT_INSTANTIATE_SORT_PAIRS(int64_t, 1)\nAT_INSTANTIATE_SORT_PAIRS(int64_t, 2)\nAT_INSTANTIATE_SORT_PAIRS(int64_t, 4)\n#define AT_INSTANTIATE_SORT_PAIRS_8(scalar_t, ScalarType) AT_INSTANTIATE_SORT_PAIRS(scalar_t, 8)\nAT_FORALL_SCALAR_TYPES_AND2(Bool, Half, AT_INSTANTIATE_SORT_PAIRS_8)\n\n#if !AT_ROCM_ENABLED() || (AT_ROCM_ENABLED() && ROCM_VERSION >= 40500)\nAT_INSTANTIATE_SORT_PAIRS(c10::BFloat16, 8)\n#endif\n} \n} \n} \n} ###" }, { "cuda": "\n#ifndef CAFFE2_UTILS_MATH_REDUCE_CUH_\n#define CAFFE2_UTILS_MATH_REDUCE_CUH_\n#include \"caffe2/utils/cub_namespace.cuh\"\n#include \n#include \"caffe2/core/common_gpu.h\"\nnamespace caffe2 {\ntemplate \nusing BlockReduce = cub::BlockReduce;\ntemplate \nusing BlockReduce2D = cub::\n BlockReduce;\n#define DISPATCH_REDUCE_KERNEL_BY_2D_BLOCK_WITH_TYPE_1( size, Func, T, grid_dim, cuda_stream, ...) do { if (size >= 128) { Func <<>>(__VA_ARGS__); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else if (size >= 64) { Func<<>>(__VA_ARGS__); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else if (size >= 32) { Func<<>>(__VA_ARGS__); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { Func<<>>(__VA_ARGS__); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } while (false)\n#define DISPATCH_REDUCE_KERNEL_BY_2D_BLOCK_WITH_TYPE_2( size, Func, T1, T2, grid_dim, cuda_stream, ...) do { if (size >= 128) { Func <<>>(__VA_ARGS__); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else if (size >= 64) { Func <<>>(__VA_ARGS__); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else if (size >= 32) { Func <<>>(__VA_ARGS__); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { Func <<>>(__VA_ARGS__); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } while (false)\n} \n#endif \n\n###", "hip": " \n#include \"hip/hip_runtime.h\"\n#ifndef CAFFE2_UTILS_MATH_REDUCE_CUH_\n#define CAFFE2_UTILS_MATH_REDUCE_CUH_\n#include \"caffe2/utils/cub_namespace.cuh\"\n#include \n#include \"caffe2/core/hip/common_gpu.h\"\nnamespace caffe2 {\ntemplate \nusing BlockReduce = hipcub::BlockReduce;\ntemplate \nusing BlockReduce2D = hipcub::\n BlockReduce;\n#define DISPATCH_REDUCE_KERNEL_BY_2D_BLOCK_WITH_TYPE_1( size, Func, T, grid_dim, hip_stream, ...) do { if (size >= 128) { hipLaunchKernelGGL(( Func) , dim3(grid_dim), dim3(dim3(1, 128)), 0, hip_stream, __VA_ARGS__); C10_HIP_KERNEL_LAUNCH_CHECK(); } else if (size >= 64) { hipLaunchKernelGGL(( Func), dim3(grid_dim), dim3(dim3(2, 64)), 0, hip_stream, __VA_ARGS__); C10_HIP_KERNEL_LAUNCH_CHECK(); } else if (size >= 32) { hipLaunchKernelGGL(( Func), dim3(grid_dim), dim3(dim3(4, 32)), 0, hip_stream, __VA_ARGS__); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( Func), dim3(grid_dim), dim3(dim3(8, 16)), 0, hip_stream, __VA_ARGS__); C10_HIP_KERNEL_LAUNCH_CHECK(); } } while (false)\n#define DISPATCH_REDUCE_KERNEL_BY_2D_BLOCK_WITH_TYPE_2( size, Func, T1, T2, grid_dim, hip_stream, ...) do { if (size >= 128) { hipLaunchKernelGGL(( Func) , dim3(grid_dim), dim3(dim3(1, 128)), 0, hip_stream, __VA_ARGS__); C10_HIP_KERNEL_LAUNCH_CHECK(); } else if (size >= 64) { hipLaunchKernelGGL(( Func) , dim3(grid_dim), dim3(dim3(2, 64)), 0, hip_stream, __VA_ARGS__); C10_HIP_KERNEL_LAUNCH_CHECK(); } else if (size >= 32) { hipLaunchKernelGGL(( Func) , dim3(grid_dim), dim3(dim3(4, 32)), 0, hip_stream, __VA_ARGS__); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( Func) , dim3(grid_dim), dim3(dim3(8, 16)), 0, hip_stream, __VA_ARGS__); C10_HIP_KERNEL_LAUNCH_CHECK(); } } while (false)\n} \n#endif ###" }, { "cuda": "\n#include \n#include \n#include \n\nnamespace caffe2 {\n\nREGISTER_CUDA_OPERATOR(VideoInput, VideoInputOp);\n\n} // namespace caffe2\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \n#include \n#include \n\nnamespace caffe2 {\n\nREGISTER_HIP_OPERATOR(VideoInput, VideoInputOp);\n\n} // namespace caffe2\n###" }, { "cuda": "\n\n\n#include \n#include \"caffe2/core/context_gpu.h\"\n#include \"modules/detectron/sample_as_op.h\"\n#include \nnamespace caffe2 {\ntemplate <>\nbool SampleAsOp::RunOnDevice() {\n auto& X = Input(0); \n auto& L = Input(1); \n CAFFE_ENFORCE(\n X.dim32(0) == L.dim32(0), \"X.dim32(0) must be equal to L.dim32(0)\", \"(\", X.dim32(0), \" vs. \", L.dim32(0), \")\");\n \n std::vector labels(L.dim32(0));\n context_.CopyBytes(\n L.dim32(0) * sizeof(int), L.data(), &labels[0]);\n \n context_.FinishDeviceComputation();\n int count = 0;\n for (int i = 0; i < L.dim32(0); i++) {\n if (labels[i] > 0) {\n count++;\n }\n }\n assert(count > 0);\n \n vector out_shape(X.sizes().vec());\n out_shape[0] = count;\n auto* Y = Output(0, out_shape, at::dtype()); \n const int len = X.size() / X.dim32(0);\n float* output = Y->mutable_data();\n for (int i = 0; i < L.dim32(0); i++) {\n if (labels[i] > 0) {\n context_.CopyBytes(\n len * sizeof(float), X.data() + i * len, output);\n output += len;\n } \n } \n return true;\n}\ntemplate <>\nbool SampleAsGradientOp::RunOnDevice() {\n auto& X = Input(0);\n auto& L = Input(1);\n auto& dY = Input(2);\n auto* dX = Output(0, X.sizes(), at::dtype());\n \n std::vector labels(L.dim32(0));\n context_.CopyBytes(\n L.dim32(0) * sizeof(int), L.data(), &labels[0]);\n \n context_.FinishDeviceComputation();\n \n math::Set(\n dX->size(), 0.f, dX->mutable_data(), &context_);\n const int len = X.size() / X.dim32(0);\n const float* input = dY.data();\n for (int i = 0; i < L.dim32(0); i++) {\n if (labels[i] > 0) {\n context_.CopyBytes(\n len * sizeof(float), input, dX->mutable_data() + i * len);\n input += len;\n } \n } \n return true;\n}\nREGISTER_CUDA_OPERATOR(SampleAs, SampleAsOp);\nREGISTER_CUDA_OPERATOR(\n SampleAsGradient, SampleAsGradientOp);\n} \n\n###", "hip": " \n\n\n#include \n#include \"caffe2/core/hip/context_gpu.h\"\n#include \"modules/detectron/sample_as_op.h\"\n#include \nnamespace caffe2 {\ntemplate <>\nbool SampleAsOp::RunOnDevice() {\n auto& X = Input(0); \n auto& L = Input(1); \n CAFFE_ENFORCE(\n X.dim32(0) == L.dim32(0), \"X.dim32(0) must be equal to L.dim32(0)\", \"(\", X.dim32(0), \" vs. \", L.dim32(0), \")\");\n \n std::vector labels(L.dim32(0));\n context_.CopyBytes(\n L.dim32(0) * sizeof(int), L.data(), &labels[0]);\n \n context_.FinishDeviceComputation();\n int count = 0;\n for (int i = 0; i < L.dim32(0); i++) {\n if (labels[i] > 0) {\n count++;\n }\n }\n assert(count > 0);\n \n vector out_shape(X.sizes().vec());\n out_shape[0] = count;\n auto* Y = Output(0, out_shape, at::dtype()); \n const int len = X.size() / X.dim32(0);\n float* output = Y->mutable_data();\n for (int i = 0; i < L.dim32(0); i++) {\n if (labels[i] > 0) {\n context_.CopyBytes(\n len * sizeof(float), X.data() + i * len, output);\n output += len;\n } \n } \n return true;\n}\ntemplate <>\nbool SampleAsGradientOp::RunOnDevice() {\n auto& X = Input(0);\n auto& L = Input(1);\n auto& dY = Input(2);\n auto* dX = Output(0, X.sizes(), at::dtype());\n \n std::vector labels(L.dim32(0));\n context_.CopyBytes(\n L.dim32(0) * sizeof(int), L.data(), &labels[0]);\n \n context_.FinishDeviceComputation();\n \n math::Set(\n dX->size(), 0.f, dX->mutable_data(), &context_);\n const int len = X.size() / X.dim32(0);\n const float* input = dY.data();\n for (int i = 0; i < L.dim32(0); i++) {\n if (labels[i] > 0) {\n context_.CopyBytes(\n len * sizeof(float), input, dX->mutable_data() + i * len);\n input += len;\n } \n } \n return true;\n}\nREGISTER_HIP_OPERATOR(SampleAs, SampleAsOp);\nREGISTER_HIP_OPERATOR(\n SampleAsGradient, SampleAsGradientOp);\n} ###" }, { "cuda": "\n// Copyright (c) Meta Platforms, Inc. and affiliates.\n//\n// This source code is licensed under the BSD-style license found in the\n// LICENSE file in the root directory of this source tree.\n\n#pragma once\n\n#include \n#include \n\nnamespace torch {\nnamespace distributed {\nnamespace c10d {\nnamespace quantization {\n\nat::Tensor _float_to_bfloat16_cuda(const at::Tensor& input);\nat::Tensor _bfloat16_to_float_cuda(const at::Tensor& input);\n\n} // namespace quantization\n} // namespace c10d\n} // namespace distributed\n} // namespace torch\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n// Copyright (c) Meta Platforms, Inc. and affiliates.\n//\n// This source code is licensed under the BSD-style license found in the\n// LICENSE file in the root directory of this source tree.\n\n#pragma once\n\n#include \n#include \n\nnamespace torch {\nnamespace distributed {\nnamespace c10d {\nnamespace quantization {\n\nat::Tensor _float_to_bfloat16_hip(const at::Tensor& input);\nat::Tensor _bfloat16_to_float_hip(const at::Tensor& input);\n\n} // namespace quantization\n} // namespace c10d\n} // namespace distributed\n} // namespace torch\n###" }, { "cuda": "\n\n#define __NVFUSER_BFLOAT_TO_US(var) *(reinterpret_cast(&(var)))\n#define __NVFUSER_BFLOAT_TO_CUS(var) \\\n *(reinterpret_cast(&(var)))\n\nstruct __bfloat;\n__device__ __bfloat __float2bfloat(const float);\n\nstruct __align__(2) __bfloat {\n __bfloat() = default;\n\n __device__ __bfloat(const float f) {\n __x = __float2bfloat(f).__x;\n }\n\n protected:\n unsigned short __x;\n};\n\n__device__ __bfloat __float2bfloat(const float f) {\n __bfloat val;\n asm(\"{ cvt.rn.bf16.f32 %0, %1;}\\n\"\n : \"=h\"(__NVFUSER_BFLOAT_TO_US(val))\n : \"f\"(f));\n return val;\n}\n\n__device__ float __bfloat2float(const __bfloat h) {\n float val;\n asm(\"{ mov.b32 %0, {0,%1};}\\n\"\n : \"=f\"(val)\n : \"h\"(__NVFUSER_BFLOAT_TO_CUS(h)));\n return val;\n}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n\n#define __NVFUSER_BFLOAT_TO_US(var) *(reinterpret_cast(&(var)))\n#define __NVFUSER_BFLOAT_TO_CUS(var) \\\n *(reinterpret_cast(&(var)))\n\nstruct __bfloat;\n__device__ __bfloat __float2bfloat(const float);\n\nstruct __align__(2) __bfloat {\n __bfloat() = default;\n\n __device__ __bfloat(const float f) {\n __x = __float2bfloat(f).__x;\n }\n\n protected:\n unsigned short __x;\n};\n\n__device__ __bfloat __float2bfloat(const float f) {\n __bfloat val;\n asm(\"{ cvt.rn.bf16.f32 %0, %1;}\\n\"\n : \"=h\"(__NVFUSER_BFLOAT_TO_US(val))\n : \"f\"(f));\n return val;\n}\n\n__device__ float __bfloat2float(const __bfloat h) {\n float val;\n asm(\"{ mov.b32 %0, {0,%1};}\\n\"\n : \"=f\"(val)\n : \"h\"(__NVFUSER_BFLOAT_TO_CUS(h)));\n return val;\n}\n###" }, { "cuda": "\n\nstruct __align__(2) __bfloat {\n __bfloat() = default;\n\n inline __device__ __bfloat(const float f) {\n if (f != f) {\n __x = uint16_t(0x7FC0);\n } else {\n union {\n uint32_t U32;\n float F32;\n };\n\n F32 = f;\n uint32_t rounding_bias = ((U32 >> 16) & 1) + uint32_t(0x7FFF);\n __x = static_cast((U32 + rounding_bias) >> 16);\n }\n }\n\n inline __device__ operator float() const {\n float res = 0;\n uint32_t tmp = __x;\n tmp <<= 16;\n float* tempRes = reinterpret_cast(&tmp);\n res = *tempRes;\n return res;\n }\n\n protected:\n unsigned short __x;\n};\n\n__device__ __bfloat __float2bfloat(const float f) {\n return __bfloat(f);\n}\n\n__device__ float __bfloat2float(const __bfloat h) {\n return float(h);\n}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n\nstruct __align__(2) __bfloat {\n __bfloat() = default;\n\n inline __device__ __bfloat(const float f) {\n if (f != f) {\n __x = uint16_t(0x7FC0);\n } else {\n union {\n uint32_t U32;\n float F32;\n };\n\n F32 = f;\n uint32_t rounding_bias = ((U32 >> 16) & 1) + uint32_t(0x7FFF);\n __x = static_cast((U32 + rounding_bias) >> 16);\n }\n }\n\n inline __device__ operator float() const {\n float res = 0;\n uint32_t tmp = __x;\n tmp <<= 16;\n float* tempRes = reinterpret_cast(&tmp);\n res = *tempRes;\n return res;\n }\n\n protected:\n unsigned short __x;\n};\n\n__device__ __bfloat __float2bfloat(const float f) {\n return __bfloat(f);\n}\n\n__device__ float __bfloat2float(const __bfloat h) {\n return float(h);\n}\n###" }, { "cuda": "\n\n// Default block synchronization. Just use __barrier_sync\nnamespace block_sync {\n\n__forceinline__ __device__ void init() {}\n\n// Thread-block synchronization\n__forceinline__ __device__ void sync() {\n __barrier_sync(0);\n}\n\n} // namespace block_sync\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n\n// Default block synchronization. Just use __barrier_sync\nnamespace block_sync {\n\n__forceinline__ __device__ void init() {}\n\n// Thread-block synchronization\n__forceinline__ __device__ void sync() {\n __barrier_sync(0);\n}\n\n} // namespace block_sync\n###" }, { "cuda": "\n\n#define __NVFUSER_HALF_TO_US(var) *(reinterpret_cast(&(var)))\n#define __NVFUSER_HALF_TO_CUS(var) \\\n *(reinterpret_cast(&(var)))\n\nstruct __half;\n__device__ __half __float2half(const float);\n\nstruct __align__(2) __half {\n __half() = default;\n\n __device__ __half(const float f) {\n __x = __float2half(f).__x;\n }\n\n protected:\n unsigned short __x;\n};\n\n__device__ __half __float2half(const float f) {\n __half val;\n asm(\"{ cvt.rn.f16.f32 %0, %1;}\\n\"\n : \"=h\"(__NVFUSER_HALF_TO_US(val))\n : \"f\"(f));\n return val;\n}\n\n__device__ float __half2float(const __half h) {\n float val;\n asm(\"{ cvt.f32.f16 %0, %1;}\\n\" : \"=f\"(val) : \"h\"(__NVFUSER_HALF_TO_CUS(h)));\n return val;\n}\n\n__device__ __half __double2half(const double d) {\n#if __CUDA_ARCH__ >= 700\n __half val;\n asm(\"{ cvt.rn.f16.f64 %0, %1;}\\n\"\n : \"=h\"(__NVFUSER_HALF_TO_US(val))\n : \"d\"(d));\n return val;\n#else\n return __float2half(static_cast(d));\n#endif\n}\n\n__device__ double __half2double(const __half h) {\n#if __CUDA_ARCH__ >= 700\n double val;\n asm(\"{ cvt.f64.f16 %0, %1;}\\n\" : \"=d\"(val) : \"h\"(__NVFUSER_HALF_TO_CUS(h)));\n return val;\n#else\n return static_cast(__half2float(h));\n#endif\n}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n\n#define __NVFUSER_HALF_TO_US(var) *(reinterpret_cast(&(var)))\n#define __NVFUSER_HALF_TO_CUS(var) \\\n *(reinterpret_cast(&(var)))\n\nstruct __half;\n__device__ __half __float2half(const float);\n\nstruct __align__(2) __half {\n __half() = default;\n\n __device__ __half(const float f) {\n __x = __float2half(f).__x;\n }\n\n protected:\n unsigned short __x;\n};\n\n__device__ __half __float2half(const float f) {\n __half val;\n asm(\"{ cvt.rn.f16.f32 %0, %1;}\\n\"\n : \"=h\"(__NVFUSER_HALF_TO_US(val))\n : \"f\"(f));\n return val;\n}\n\n__device__ float __half2float(const __half h) {\n float val;\n asm(\"{ cvt.f32.f16 %0, %1;}\\n\" : \"=f\"(val) : \"h\"(__NVFUSER_HALF_TO_CUS(h)));\n return val;\n}\n\n__device__ __half __double2half(const double d) {\n#if __HIP_ARCH__ >= 700\n __half val;\n asm(\"{ cvt.rn.f16.f64 %0, %1;}\\n\"\n : \"=h\"(__NVFUSER_HALF_TO_US(val))\n : \"d\"(d));\n return val;\n#else\n return __float2half(static_cast(d));\n#endif\n}\n\n__device__ double __half2double(const __half h) {\n#if __HIP_ARCH__ >= 700\n double val;\n asm(\"{ cvt.f64.f16 %0, %1;}\\n\" : \"=d\"(val) : \"h\"(__NVFUSER_HALF_TO_CUS(h)));\n return val;\n#else\n return static_cast(__half2float(h));\n#endif\n}\n###" }, { "cuda": "\nnamespace fused_reduction {\n\n\n\n\n\n\n\ntemplate <\n int NumVals, typename DataTypeT, typename IndexTypeT, template \n typename MakeTuple>\nstruct WelfordTripletTuple {\n static constexpr int num_vals = NumVals;\n using DataType = DataTypeT;\n using IndexType = IndexTypeT;\n using DataTuple = typename MakeTuple::type;\n using IndexTuple = typename MakeTuple::type;\n DataTuple avg;\n DataTuple var;\n IndexTuple N;\n WelfordTripletTuple(\n const DataTuple& avg, const DataTuple& var, const IndexTuple& N)\n : avg(avg), var(var), N(N) {}\n};\ntemplate \nusing LocalWelfordTripletTuple =\n WelfordTripletTuple;\ntemplate \nusing RefWelfordTripletTuple =\n WelfordTripletTuple;\ntemplate \nusing ConstRefWelfordTripletTuple =\n WelfordTripletTuple;\ntemplate \nusing VolatilePtrWelfordTripletTuple =\n WelfordTripletTuple;\n\n\ntemplate \n__inline__ __device__ static void operator+=(\n WelfordTripletTupleType& triplet, nvfuser_index_t offset) {\n triplet.avg += offset;\n triplet.var += offset;\n triplet.N += offset;\n}\n\ntemplate \n__inline__ __device__ static void copyWelfordTripletTuple(\n DstType& dst, nvfuser_index_t dst_offset, const SrcType& src, nvfuser_index_t src_offset = 0) {\n copyTuple(dst.avg, dst_offset, src.avg, src_offset);\n copyTuple(dst.var, dst_offset, src.var, src_offset);\n copyTuple(dst.N, dst_offset, src.N, src_offset);\n}\n\ntemplate \n__inline__ __device__ static void copyWelfordTripletTuple(\n DstType& dst, const SrcType& src, nvfuser_index_t src_offset = 0) {\n copyWelfordTripletTuple(dst, 0, src, src_offset);\n}\n\ntemplate \n__inline__ __device__ static void copyWelfordTripletTupleIf(\n DstType& dst, const SrcType& src, const PredType& pred) {\n copyTupleIf(dst.avg, src.avg, pred);\n copyTupleIf(dst.var, src.var, pred);\n copyTupleIf(dst.N, src.N, pred);\n}\n} \n\n###", "hip": " \nnamespace fused_reduction {\n\n\n\n\n\n\n\ntemplate <\n int NumVals, typename DataTypeT, typename IndexTypeT, template \n typename MakeTuple>\nstruct WelfordTripletTuple {\n static constexpr int num_vals = NumVals;\n using DataType = DataTypeT;\n using IndexType = IndexTypeT;\n using DataTuple = typename MakeTuple::type;\n using IndexTuple = typename MakeTuple::type;\n DataTuple avg;\n DataTuple var;\n IndexTuple N;\n WelfordTripletTuple(\n const DataTuple& avg, const DataTuple& var, const IndexTuple& N)\n : avg(avg), var(var), N(N) {}\n};\ntemplate \nusing LocalWelfordTripletTuple =\n WelfordTripletTuple;\ntemplate \nusing RefWelfordTripletTuple =\n WelfordTripletTuple;\ntemplate \nusing ConstRefWelfordTripletTuple =\n WelfordTripletTuple;\ntemplate \nusing VolatilePtrWelfordTripletTuple =\n WelfordTripletTuple;\n\n\ntemplate \n__inline__ __device__ static void operator+=(\n WelfordTripletTupleType& triplet, nvfuser_index_t offset) {\n triplet.avg += offset;\n triplet.var += offset;\n triplet.N += offset;\n}\n\ntemplate \n__inline__ __device__ static void copyWelfordTripletTuple(\n DstType& dst, nvfuser_index_t dst_offset, const SrcType& src, nvfuser_index_t src_offset = 0) {\n copyTuple(dst.avg, dst_offset, src.avg, src_offset);\n copyTuple(dst.var, dst_offset, src.var, src_offset);\n copyTuple(dst.N, dst_offset, src.N, src_offset);\n}\n\ntemplate \n__inline__ __device__ static void copyWelfordTripletTuple(\n DstType& dst, const SrcType& src, nvfuser_index_t src_offset = 0) {\n copyWelfordTripletTuple(dst, 0, src, src_offset);\n}\n\ntemplate \n__inline__ __device__ static void copyWelfordTripletTupleIf(\n DstType& dst, const SrcType& src, const PredType& pred) {\n copyTupleIf(dst.avg, src.avg, pred);\n copyTupleIf(dst.var, src.var, pred);\n copyTupleIf(dst.N, src.N, pred);\n}\n} ###" }, { "cuda": "\nnamespace grid_broadcast {\n\n// Broadcasts per-thread values across threads and blocks.\n//\n// Function parameters:\n// - out: Per-thread output location\n// - inp_val: Per-thread input value\n// - work_buf: Temporary buffer for communication across threads/blocks\n// - sync_flags: A vector of integers for synchronizations\n//\n// Template parameters:\n// - X/Y/Z_BLOCK: When true, broadcasts across thread blocks along the X/Y/Z\n// dimensions\n// - X/Y/Z_THREAD: When true, broadcasts across threads along the X/Y/Z\n// dimensions\ntemplate <\n bool X_BLOCK,\n bool Y_BLOCK,\n bool Z_BLOCK,\n bool X_THREAD,\n bool Y_THREAD,\n bool Z_THREAD,\n typename T>\n__device__ void broadcast(\n T& out,\n const T& inp_val,\n volatile T* work_buf,\n Tensor sync_flags,\n bool read_write_pred) {\n // Number of values broadcasted in the grid dimensions\n const auto grid_seg_size =\n index_utils::maskedSize(gridDim);\n\n // Index of the broadcast we're performing out of the grid_seg_size\n const auto grid_seg_idx =\n index_utils::maskedOffset(\n blockIdx, gridDim);\n\n // Number of threads not participating in a broadcast dimension, this is the\n // number of thread entries to expect in the work buffer, therefore a striding\n const auto block_stride =\n index_utils::maskedSize(blockDim);\n\n // Which broadcast in the block this is to line up the entry with the work\n // buffer\n const auto thread_offset =\n index_utils::maskedOffset(\n threadIdx, blockDim);\n\n const bool has_valid_data = (!X_BLOCK || blockIdx.x == gridDim.x - 1) &&\n (!Y_BLOCK || blockIdx.y == gridDim.y - 1) &&\n (!Z_BLOCK || blockIdx.z == gridDim.z - 1) &&\n (!X_THREAD || threadIdx.x == 0) && (!Y_THREAD || threadIdx.y == 0) &&\n (!Z_THREAD || threadIdx.z == 0);\n\n if (has_valid_data && read_write_pred) {\n work_buf[grid_seg_idx * block_stride + thread_offset] = inp_val;\n __threadfence();\n }\n\n grid_sync::sync(\n sync_flags[grid_seg_idx], grid_seg_size);\n\n if (read_write_pred) {\n out = work_buf[grid_seg_idx * block_stride + thread_offset];\n }\n\n // Make sure everyone has read from the buffer before continuing the kernel\n // and potentially overwriting\n grid_sync::sync(\n sync_flags[grid_seg_idx], grid_seg_size);\n}\n} // namespace grid_broadcast\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\nnamespace grid_broadcast {\n\n// Broadcasts per-thread values across threads and blocks.\n//\n// Function parameters:\n// - out: Per-thread output location\n// - inp_val: Per-thread input value\n// - work_buf: Temporary buffer for communication across threads/blocks\n// - sync_flags: A vector of integers for synchronizations\n//\n// Template parameters:\n// - X/Y/Z_BLOCK: When true, broadcasts across thread blocks along the X/Y/Z\n// dimensions\n// - X/Y/Z_THREAD: When true, broadcasts across threads along the X/Y/Z\n// dimensions\ntemplate <\n bool X_BLOCK,\n bool Y_BLOCK,\n bool Z_BLOCK,\n bool X_THREAD,\n bool Y_THREAD,\n bool Z_THREAD,\n typename T>\n__device__ void broadcast(\n T& out,\n const T& inp_val,\n volatile T* work_buf,\n Tensor sync_flags,\n bool read_write_pred) {\n // Number of values broadcasted in the grid dimensions\n const auto grid_seg_size =\n index_utils::maskedSize(gridDim);\n\n // Index of the broadcast we're performing out of the grid_seg_size\n const auto grid_seg_idx =\n index_utils::maskedOffset(\n blockIdx, gridDim);\n\n // Number of threads not participating in a broadcast dimension, this is the\n // number of thread entries to expect in the work buffer, therefore a striding\n const auto block_stride =\n index_utils::maskedSize(blockDim);\n\n // Which broadcast in the block this is to line up the entry with the work\n // buffer\n const auto thread_offset =\n index_utils::maskedOffset(\n threadIdx, blockDim);\n\n const bool has_valid_data = (!X_BLOCK || blockIdx.x == gridDim.x - 1) &&\n (!Y_BLOCK || blockIdx.y == gridDim.y - 1) &&\n (!Z_BLOCK || blockIdx.z == gridDim.z - 1) &&\n (!X_THREAD || threadIdx.x == 0) && (!Y_THREAD || threadIdx.y == 0) &&\n (!Z_THREAD || threadIdx.z == 0);\n\n if (has_valid_data && read_write_pred) {\n work_buf[grid_seg_idx * block_stride + thread_offset] = inp_val;\n __threadfence();\n }\n\n grid_sync::sync(\n sync_flags[grid_seg_idx], grid_seg_size);\n\n if (read_write_pred) {\n out = work_buf[grid_seg_idx * block_stride + thread_offset];\n }\n\n // Make sure everyone has read from the buffer before continuing the kernel\n // and potentially overwriting\n grid_sync::sync(\n sync_flags[grid_seg_idx], grid_seg_size);\n}\n} // namespace grid_broadcast\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\nnamespace {\nCONSTEXPR_EXCEPT_WIN_CUDA char bessel_j0_name[] = \"bessel_j0_forward\";\n\nvoid bessel_j0_kernel_cuda(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"bessel_j0_cuda\", [&]() {\n jitted_gpu_kernel(iterator, bessel_j0_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"bessel_j0_cuda\", [&]() {\n gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {\n return bessel_j0_forward(a);\n });\n });\n#endif // AT_USE_JITERATOR()\n}\n\n} // anonymous namespace\n\nREGISTER_DISPATCH(special_bessel_j0_stub, &bessel_j0_kernel_cuda);\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\nnamespace {\nCONSTEXPR_EXCEPT_WIN_HIP char bessel_j0_name[] = \"bessel_j0_forward\";\n\nvoid bessel_j0_kernel_hip(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"bessel_j0_hip\", [&]() {\n jitted_gpu_kernel(iterator, bessel_j0_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"bessel_j0_hip\", [&]() {\n gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {\n return bessel_j0_forward(a);\n });\n });\n#endif // AT_USE_JITERATOR()\n}\n\n} // anonymous namespace\n\nREGISTER_DISPATCH(special_bessel_j0_stub, &bessel_j0_kernel_hip);\n} // namespace at::native\n###" }, { "cuda": "\nnamespace grid_sync {\n\n#define FIRST_UINT64_BIT ((uint64_t)1 << (sizeof(uint64_t) * 8 - 1))\ntemplate \n__device__ T globalAsVolatile(volatile T& global_val) {\n return global_val;\n}\n\n\n\n\n\n\n\n\n\ntemplate \n__device__ void sync(\n int64_t& semaphore, const uint64_t& segment_size, const bool last_block) {\n \n __threadfence();\n \n block_sync::sync();\n \n if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) {\n \n \n \n \n uint64_t semaphore_increment = 1;\n \n \n \n if (last_block) {\n semaphore_increment = FIRST_UINT64_BIT - (segment_size - 1);\n }\n uint64_t oldArrive =\n atomicAdd(reinterpret_cast(&semaphore), semaphore_increment);\n \n \n \n unsigned int ns = 8;\n while ((PERSISTENT || last_block) &&\n ((oldArrive ^ globalAsVolatile(semaphore)) & FIRST_UINT64_BIT) ==\n 0) {\n \n \n#if __CUDA_ARCH__ >= 700\n \n __nanosleep(ns); \n if (ns < 256) {\n ns *= 2;\n }\n#endif\n }\n }\n \n block_sync::sync();\n}\ntemplate \n__device__ void sync(int64_t& semaphore, const uint64_t& segment_size) {\n sync(\n semaphore, segment_size, index_utils::maskedIsLast(blockIdx, gridDim));\n}\n\n\n\n\n\n\n\n\n\n\n\ntemplate \n__device__ void sync(\n int64_t& semaphore, const uint64_t& segment_size, const nvfuser_index_t n_entrances) {\n \n __threadfence();\n \n block_sync::sync();\n \n if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) {\n \n \n \n bool last_block =\n index_utils::maskedIsLast(blockIdx, gridDim);\n if (last_block) {\n int64_t finished_val =\n ((int64_t)(\n index_utils::maskedSize(gridDim) -\n 1)) *\n ((int64_t)n_entrances);\n unsigned int ns = 8;\n \n while (globalAsVolatile(semaphore) < finished_val) {\n#if __CUDA_ARCH__ >= 700\n \n __nanosleep(ns); \n if (ns < 256) {\n ns *= 2;\n }\n#endif\n }\n } else {\n auto old = atomicAdd(reinterpret_cast(&semaphore), 1);\n }\n }\n \n block_sync::sync();\n}\n} \n\n###", "hip": " \nnamespace grid_sync {\n\n#define FIRST_UINT64_BIT ((uint64_t)1 << (sizeof(uint64_t) * 8 - 1))\ntemplate \n__device__ T globalAsVolatile(volatile T& global_val) {\n return global_val;\n}\n\n\n\n\n\n\n\n\n\ntemplate \n__device__ void sync(\n int64_t& semaphore, const uint64_t& segment_size, const bool last_block) {\n \n __threadfence();\n \n block_sync::sync();\n \n if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) {\n \n \n \n \n uint64_t semaphore_increment = 1;\n \n \n \n if (last_block) {\n semaphore_increment = FIRST_UINT64_BIT - (segment_size - 1);\n }\n uint64_t oldArrive =\n atomicAdd(reinterpret_cast(&semaphore), semaphore_increment);\n \n \n \n unsigned int ns = 8;\n while ((PERSISTENT || last_block) &&\n ((oldArrive ^ globalAsVolatile(semaphore)) & FIRST_UINT64_BIT) ==\n 0) {\n \n \n#if __HIP_ARCH__ >= 700\n \n __nanosleep(ns); \n if (ns < 256) {\n ns *= 2;\n }\n#endif\n }\n }\n \n block_sync::sync();\n}\ntemplate \n__device__ void sync(int64_t& semaphore, const uint64_t& segment_size) {\n sync(\n semaphore, segment_size, index_utils::maskedIsLast(blockIdx, gridDim));\n}\n\n\n\n\n\n\n\n\n\n\n\ntemplate \n__device__ void sync(\n int64_t& semaphore, const uint64_t& segment_size, const nvfuser_index_t n_entrances) {\n \n __threadfence();\n \n block_sync::sync();\n \n if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) {\n \n \n \n bool last_block =\n index_utils::maskedIsLast(blockIdx, gridDim);\n if (last_block) {\n int64_t finished_val =\n ((int64_t)(\n index_utils::maskedSize(gridDim) -\n 1)) *\n ((int64_t)n_entrances);\n unsigned int ns = 8;\n \n while (globalAsVolatile(semaphore) < finished_val) {\n#if __HIP_ARCH__ >= 700\n \n __nanosleep(ns); \n if (ns < 256) {\n ns *= 2;\n }\n#endif\n }\n } else {\n auto old = atomicAdd(reinterpret_cast(&semaphore), 1);\n }\n }\n \n block_sync::sync();\n}\n} ###" }, { "cuda": "\nnamespace index_utils {\n\n// Utility functions\n\n// Total size of provided dimension\ntemplate \n__device__ __forceinline__ nvfuser_index_t size(const _dim3& d) {\n return (nvfuser_index_t)d.x * (nvfuser_index_t)d.y * (nvfuser_index_t)d.z;\n}\n\n// Linearized indexing of idx based on dim, if bool==false that dimension does\n// not participate\ntemplate \n__device__ nvfuser_index_t maskedOffset(const _dim3& idx, const _dim3_2& dim) {\n nvfuser_index_t offset = 0;\n if (Z)\n offset += idx.z;\n if (Y)\n offset = offset * dim.y + idx.y;\n if (X)\n offset = offset * dim.x + idx.x;\n return offset;\n}\n\n// Linearized indexing of idx based on dim. All dimensions participate.\ntemplate \n__device__ nvfuser_index_t offset(const _dim3& idx, const _dim3_2& dim) {\n nvfuser_index_t offset = idx.z;\n offset = offset * dim.y + idx.y;\n offset = offset * dim.x + idx.x;\n return offset;\n}\n\n// Masks the provided dim3, those == false get truncated to 1\ntemplate \n__device__ dim3 maskedDims(const _dim3& dim) {\n return dim3{\n X ? (unsigned)dim.x : 1U,\n Y ? (unsigned)dim.y : 1U,\n Z ? (unsigned)dim.z : 1U};\n}\n\n// Provides total size of dim with masking, those dims == false do not\n// participate in the size calculation\ntemplate \n__device__ nvfuser_index_t maskedSize(const _dim3& dim) {\n return size(maskedDims(dim));\n}\n\n// Checks if provided idx is zero on those dims == true\ntemplate \n__device__ bool maskedIsZero(const _dim3& idx) {\n bool isZero = true;\n if (X)\n isZero = isZero && idx.x == 0;\n if (Y)\n isZero = isZero && idx.y == 0;\n if (Z)\n isZero = isZero && idx.z == 0;\n return isZero;\n}\n\n// Checks if provided idx is zero on those dims == true\ntemplate \n__device__ bool maskedIsLast(const _dim3& idx, const _dim3_2& dim) {\n bool isZero = true;\n if (X)\n isZero = isZero && idx.x == dim.x - 1;\n if (Y)\n isZero = isZero && idx.y == dim.y - 1;\n if (Z)\n isZero = isZero && idx.z == dim.z - 1;\n return isZero;\n}\n\n} // namespace index_utils\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\nnamespace index_utils {\n\n// Utility functions\n\n// Total size of provided dimension\ntemplate \n__device__ __forceinline__ nvfuser_index_t size(const _dim3& d) {\n return (nvfuser_index_t)d.x * (nvfuser_index_t)d.y * (nvfuser_index_t)d.z;\n}\n\n// Linearized indexing of idx based on dim, if bool==false that dimension does\n// not participate\ntemplate \n__device__ nvfuser_index_t maskedOffset(const _dim3& idx, const _dim3_2& dim) {\n nvfuser_index_t offset = 0;\n if (Z)\n offset += idx.z;\n if (Y)\n offset = offset * dim.y + idx.y;\n if (X)\n offset = offset * dim.x + idx.x;\n return offset;\n}\n\n// Linearized indexing of idx based on dim. All dimensions participate.\ntemplate \n__device__ nvfuser_index_t offset(const _dim3& idx, const _dim3_2& dim) {\n nvfuser_index_t offset = idx.z;\n offset = offset * dim.y + idx.y;\n offset = offset * dim.x + idx.x;\n return offset;\n}\n\n// Masks the provided dim3, those == false get truncated to 1\ntemplate \n__device__ dim3 maskedDims(const _dim3& dim) {\n return dim3{\n X ? (unsigned)dim.x : 1U,\n Y ? (unsigned)dim.y : 1U,\n Z ? (unsigned)dim.z : 1U};\n}\n\n// Provides total size of dim with masking, those dims == false do not\n// participate in the size calculation\ntemplate \n__device__ nvfuser_index_t maskedSize(const _dim3& dim) {\n return size(maskedDims(dim));\n}\n\n// Checks if provided idx is zero on those dims == true\ntemplate \n__device__ bool maskedIsZero(const _dim3& idx) {\n bool isZero = true;\n if (X)\n isZero = isZero && idx.x == 0;\n if (Y)\n isZero = isZero && idx.y == 0;\n if (Z)\n isZero = isZero && idx.z == 0;\n return isZero;\n}\n\n// Checks if provided idx is zero on those dims == true\ntemplate \n__device__ bool maskedIsLast(const _dim3& idx, const _dim3_2& dim) {\n bool isZero = true;\n if (X)\n isZero = isZero && idx.x == dim.x - 1;\n if (Y)\n isZero = isZero && idx.y == dim.y - 1;\n if (Z)\n isZero = isZero && idx.z == dim.z - 1;\n return isZero;\n}\n\n} // namespace index_utils\n###" }, { "cuda": "\n__device__ unsigned int mulhilo32(\n unsigned int a,\n unsigned int b,\n unsigned int* result_high) {\n *result_high = __umulhi(a, b);\n return a * b;\n}\n\n__device__ uint4 single_round(uint4 ctr, uint2 key) {\n constexpr unsigned long kPhiloxSA = 0xD2511F53;\n constexpr unsigned long kPhiloxSB = 0xCD9E8D57;\n unsigned int hi0;\n unsigned int hi1;\n unsigned int lo0 = mulhilo32(kPhiloxSA, ctr.x, &hi0);\n unsigned int lo1 = mulhilo32(kPhiloxSB, ctr.z, &hi1);\n uint4 ret = {hi1 ^ ctr.y ^ key.x, lo1, hi0 ^ ctr.w ^ key.y, lo0};\n return ret;\n}\n\n__device__ uint4 philox(\n unsigned long long seed,\n unsigned long long subsequence,\n unsigned long long offset) {\n constexpr unsigned long kPhilox10A = 0x9E3779B9;\n constexpr unsigned long kPhilox10B = 0xBB67AE85;\n uint2 key = {};\n key.x = (unsigned int)seed;\n key.y = (unsigned int)(seed >> 32);\n uint4 counter = make_uint4(0, 0, 0, 0);\n counter.x = (unsigned int)(offset);\n counter.y = (unsigned int)(offset >> 32);\n counter.z = (unsigned int)(subsequence);\n counter.w = (unsigned int)(subsequence >> 32);\n\n uint4 output = {};\n uint2 key_ = key;\n uint4 counter_ = counter;\n for (int i = 0; i < 9; i++) {\n counter_ = single_round(counter_, key_);\n key_.x += (kPhilox10A);\n key_.y += (kPhilox10B);\n }\n output = single_round(counter_, key_);\n return output;\n}\n\n__device__ float uniformf(unsigned int x) {\n constexpr float kRanInvM32 = 2.3283064e-10f; // Inverse of 2^32.\n float result = x * kRanInvM32;\n return result == 1 ? 0.0f : result;\n}\n\n__device__ double uniform(unsigned int x, unsigned int y) {\n constexpr double kRan2Pow53Inv = 1.1102230246251565e-16;\n const unsigned long long z =\n (unsigned long long)x ^ ((unsigned long long)y << (53 - 32));\n double result = z * kRan2Pow53Inv + (kRan2Pow53Inv / 2.0);\n return result == 1 ? 0.0 : result;\n}\n\n__device__ double rng_uniform(const uint4& rng_result, int rng_component) {\n return uniform(\n (&rng_result.x)[rng_component * 2],\n (&rng_result.x)[rng_component * 2 + 1]);\n}\n\n__device__ float rng_uniformf(const uint4& rng_result, int rng_component) {\n return uniformf((&rng_result.x)[rng_component]);\n}\n\n__device__ double rng_uniform_range(\n const uint4& rng_result,\n int rng_component,\n double from,\n double to) {\n auto range = to - from;\n auto uniform01 = rng_uniform(rng_result, rng_component);\n return from + range * uniform01;\n}\n\n__device__ float rng_uniform_rangef(\n const uint4& rng_result,\n int rng_component,\n float from,\n float to) {\n auto range = to - from;\n auto uniform01 = rng_uniformf(rng_result, rng_component);\n return from + range * uniform01;\n}\n\n\n###", "hip": " \n__device__ unsigned int mulhilo32(\n unsigned int a, unsigned int b, unsigned int* result_high) {\n *result_high = __umulhi(a, b);\n return a * b;\n}\n__device__ uint4 single_round(uint4 ctr, uint2 key) {\n constexpr unsigned long kPhiloxSA = 0xD2511F53;\n constexpr unsigned long kPhiloxSB = 0xCD9E8D57;\n unsigned int hi0;\n unsigned int hi1;\n unsigned int lo0 = mulhilo32(kPhiloxSA, ctr.x, &hi0);\n unsigned int lo1 = mulhilo32(kPhiloxSB, ctr.z, &hi1);\n uint4 ret = {hi1 ^ ctr.y ^ key.x, lo1, hi0 ^ ctr.w ^ key.y, lo0};\n return ret;\n}\n__device__ uint4 philox(\n unsigned long long seed, unsigned long long subsequence, unsigned long long offset) {\n constexpr unsigned long kPhilox10A = 0x9E3779B9;\n constexpr unsigned long kPhilox10B = 0xBB67AE85;\n uint2 key = {};\n key.x = (unsigned int)seed;\n key.y = (unsigned int)(seed >> 32);\n uint4 counter = make_uint4(0, 0, 0, 0);\n counter.x = (unsigned int)(offset);\n counter.y = (unsigned int)(offset >> 32);\n counter.z = (unsigned int)(subsequence);\n counter.w = (unsigned int)(subsequence >> 32);\n uint4 output = {};\n uint2 key_ = key;\n uint4 counter_ = counter;\n for (int i = 0; i < 9; i++) {\n counter_ = single_round(counter_, key_);\n key_.x += (kPhilox10A);\n key_.y += (kPhilox10B);\n }\n output = single_round(counter_, key_);\n return output;\n}\n__device__ float uniformf(unsigned int x) {\n constexpr float kRanInvM32 = 2.3283064e-10f; \n float result = x * kRanInvM32;\n return result == 1 ? 0.0f : result;\n}\n__device__ double uniform(unsigned int x, unsigned int y) {\n constexpr double kRan2Pow53Inv = 1.1102230246251565e-16;\n const unsigned long long z =\n (unsigned long long)x ^ ((unsigned long long)y << (53 - 32));\n double result = z * kRan2Pow53Inv + (kRan2Pow53Inv / 2.0);\n return result == 1 ? 0.0 : result;\n}\n__device__ double rng_uniform(const uint4& rng_result, int rng_component) {\n return uniform(\n (&rng_result.x)[rng_component * 2], (&rng_result.x)[rng_component * 2 + 1]);\n}\n__device__ float rng_uniformf(const uint4& rng_result, int rng_component) {\n return uniformf((&rng_result.x)[rng_component]);\n}\n__device__ double rng_uniform_range(\n const uint4& rng_result, int rng_component, double from, double to) {\n auto range = to - from;\n auto uniform01 = rng_uniform(rng_result, rng_component);\n return from + range * uniform01;\n}\n__device__ float rng_uniform_rangef(\n const uint4& rng_result, int rng_component, float from, float to) {\n auto range = to - from;\n auto uniform01 = rng_uniformf(rng_result, rng_component);\n return from + range * uniform01;\n}###" }, { "cuda": "\n// Utility macro for this file\n#define DEVICE_INLINE __device__ inline\n\n// Utility class for 2D swizzle:\ntemplate \nstruct IndexGeneric {\n const index_t x = 0, y = 0;\n DEVICE_INLINE IndexGeneric(index_t x_, index_t y_) : x(x_), y(y_) {}\n};\n\n// Default type for integration\nusing Index2D = IndexGeneric;\n\n// Small type for unit computation\nusing Index2DInt = IndexGeneric;\n\n// ------------------------------------------------------------\n// Swizzle Definitions\n// for each swizzle name:\n// un(Swizzle Name) e.g. unZShape is the inverse of ZShape,\n// (unswizzle is needed for inlining and is currently not actively used.)\n// ------------------------------------------------------------\n\n// Unit Z swizzle:\n// Alternate directions of Y dimension:\n// 1 2 3 1 2 3\n// 4 5 6 => 6 5 4\n// 7 8 9 7 8 9\nDEVICE_INLINE Index2D ZShape(Index2D in, Index2D unit_dim) {\n return Index2D(in.x, in.x % 2 == 0 ? in.y : (unit_dim.y - in.y - 1));\n}\n\n// ZShape is inverse of itself\nDEVICE_INLINE Index2D unZShape(Index2D in, Index2D unit_dim) {\n return ZShape(in, unit_dim);\n}\n\n// Block cyclic Xor swizzle: (bank conflict removal)\n// Apply cyclic Xor within blocks:\n// Example: cyclic Xor\n// 1 2 3 4 1 2 3 4\n// 5 6 7 8 6 5 8 7\n// 9 10 11 12 => 11 12 9 10\n// 13 14 15 16 16 15 14 13\n// Note:\nDEVICE_INLINE Index2D Xor(Index2D in, Index2DInt unit_dim) {\n // Need to validate in swizzle configuration:\n // unit_dim.x == unit_dim.y\n return Index2D(in.x, (in.y ^ in.x));\n}\n\n// Inverse of Xor is itself\nDEVICE_INLINE Index2D unXor(Index2D in, Index2DInt unit_dim) {\n return Xor(in, unit_dim);\n}\n\n// Scatter swizzle:\n// Corresponds to the data layout out of ldmatrix intrinsic.\n// supported dimensions are : 8x4, 16x4, 32x4\ntemplate \nDEVICE_INLINE Index2D Scatter(Index2D in) {\n static_assert(row_size == 8 || row_size == 16 || row_size == 32);\n return Index2D((in.y * row_size + in.x) / 4, in.x % 4);\n}\n\ntemplate \nDEVICE_INLINE Index2D unScatter(Index2D in) {\n static_assert(row_size == 8 || row_size == 16 || row_size == 32);\n return Index2D(in.y + (in.x % (row_size / 4)) * 4, in.x / (row_size / 4));\n}\n\n#undef DEVICE_INLINE\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n// Utility macro for this file\n#define DEVICE_INLINE __device__ inline\n\n// Utility class for 2D swizzle:\ntemplate \nstruct IndexGeneric {\n const index_t x = 0, y = 0;\n DEVICE_INLINE IndexGeneric(index_t x_, index_t y_) : x(x_), y(y_) {}\n};\n\n// Default type for integration\nusing Index2D = IndexGeneric;\n\n// Small type for unit computation\nusing Index2DInt = IndexGeneric;\n\n// ------------------------------------------------------------\n// Swizzle Definitions\n// for each swizzle name:\n// un(Swizzle Name) e.g. unZShape is the inverse of ZShape,\n// (unswizzle is needed for inlining and is currently not actively used.)\n// ------------------------------------------------------------\n\n// Unit Z swizzle:\n// Alternate directions of Y dimension:\n// 1 2 3 1 2 3\n// 4 5 6 => 6 5 4\n// 7 8 9 7 8 9\nDEVICE_INLINE Index2D ZShape(Index2D in, Index2D unit_dim) {\n return Index2D(in.x, in.x % 2 == 0 ? in.y : (unit_dim.y - in.y - 1));\n}\n\n// ZShape is inverse of itself\nDEVICE_INLINE Index2D unZShape(Index2D in, Index2D unit_dim) {\n return ZShape(in, unit_dim);\n}\n\n// Block cyclic Xor swizzle: (bank conflict removal)\n// Apply cyclic Xor within blocks:\n// Example: cyclic Xor\n// 1 2 3 4 1 2 3 4\n// 5 6 7 8 6 5 8 7\n// 9 10 11 12 => 11 12 9 10\n// 13 14 15 16 16 15 14 13\n// Note:\nDEVICE_INLINE Index2D Xor(Index2D in, Index2DInt unit_dim) {\n // Need to validate in swizzle configuration:\n // unit_dim.x == unit_dim.y\n return Index2D(in.x, (in.y ^ in.x));\n}\n\n// Inverse of Xor is itself\nDEVICE_INLINE Index2D unXor(Index2D in, Index2DInt unit_dim) {\n return Xor(in, unit_dim);\n}\n\n// Scatter swizzle:\n// Corresponds to the data layout out of ldmatrix intrinsic.\n// supported dimensions are : 8x4, 16x4, 32x4\ntemplate \nDEVICE_INLINE Index2D Scatter(Index2D in) {\n static_assert(row_size == 8 || row_size == 16 || row_size == 32);\n return Index2D((in.y * row_size + in.x) / 4, in.x % 4);\n}\n\ntemplate \nDEVICE_INLINE Index2D unScatter(Index2D in) {\n static_assert(row_size == 8 || row_size == 16 || row_size == 32);\n return Index2D(in.y + (in.x % (row_size / 4)) * 4, in.x / (row_size / 4));\n}\n\n#undef DEVICE_INLINE\n###" }, { "cuda": "\ntemplate \nstruct Tensor {\n __device__ T& operator[](nvfuser_index_t ind) {\n return data[ind];\n };\n\n T* data;\n nvfuser_index_t size[N];\n nvfuser_index_t stride[N];\n};\n\n// Specialization for 0-dim case as it does not need size and stride arrays.\n// They will be an error as well since zero-length arrays are not allowed.\ntemplate \nstruct Tensor {\n __device__ T& operator[](nvfuser_index_t) {\n return *data;\n };\n\n T* data;\n};\n\n// Specialization for 0-dim case that's easy to pass in a CPU based tensor.\ntemplate \nstruct CpuScalarTensor {\n __device__ T& operator[](int) {\n return data;\n };\n\n T data;\n};\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\ntemplate \nstruct Tensor {\n __device__ T& operator[](nvfuser_index_t ind) {\n return data[ind];\n };\n\n T* data;\n nvfuser_index_t size[N];\n nvfuser_index_t stride[N];\n};\n\n// Specialization for 0-dim case as it does not need size and stride arrays.\n// They will be an error as well since zero-length arrays are not allowed.\ntemplate \nstruct Tensor {\n __device__ T& operator[](nvfuser_index_t) {\n return *data;\n };\n\n T* data;\n};\n\n// Specialization for 0-dim case that's easy to pass in a CPU based tensor.\ntemplate \nstruct CpuScalarTensor {\n __device__ T& operator[](int) {\n return data;\n };\n\n T data;\n};\n###" }, { "cuda": "\n// Type trait utils\ntemplate \nstruct MaybeVolatile;\n\ntemplate \nstruct MaybeVolatile {\n using type = volatile Type;\n};\n\ntemplate \nstruct MaybeVolatile {\n using type = Type;\n};\n\ntemplate \nstruct TypeList {};\n\ntemplate \nstruct TypeSelector {\n using type = typename TypeSelector::type;\n};\n\ntemplate \nstruct TypeSelector<0, T, Types...> {\n using type = T;\n};\n\ntemplate \nstruct IsSameType {\n static constexpr bool value = false;\n};\n\ntemplate \nstruct IsSameType {\n static constexpr bool value = true;\n};\n\ntemplate \nstruct IsPointerType {\n static constexpr bool value = false;\n};\n\ntemplate \nstruct IsPointerType {\n static constexpr bool value = true;\n};\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n// Type trait utils\ntemplate \nstruct MaybeVolatile;\n\ntemplate \nstruct MaybeVolatile {\n using type = volatile Type;\n};\n\ntemplate \nstruct MaybeVolatile {\n using type = Type;\n};\n\ntemplate \nstruct TypeList {};\n\ntemplate \nstruct TypeSelector {\n using type = typename TypeSelector::type;\n};\n\ntemplate \nstruct TypeSelector<0, T, Types...> {\n using type = T;\n};\n\ntemplate \nstruct IsSameType {\n static constexpr bool value = false;\n};\n\ntemplate \nstruct IsSameType {\n static constexpr bool value = true;\n};\n\ntemplate \nstruct IsPointerType {\n static constexpr bool value = false;\n};\n\ntemplate \nstruct IsPointerType {\n static constexpr bool value = true;\n};\n###" }, { "cuda": "\nnamespace warp {\n\ntemplate <\n bool SINGLE_WARP,\n typename T,\n typename Func,\n typename _dim3ti,\n typename _dim3bd>\n__device__ void warpReduceTIDX(\n T& out,\n const T& inp_val,\n Func reduction_op,\n const _dim3ti& thread_idx,\n const _dim3bd& block_dim,\n T* shared_mem,\n bool read_write_pred,\n T init_val) {\n constexpr int WARP_SIZE = 32;\n\n // Assume input padded to multiples of a warp\n T reduce_val = init_val;\n\n // Do warp reduction\n if (read_write_pred) {\n reduce_val = inp_val;\n }\n\n // Reduce within each warp\n for (int i = 16; i >= 1; i /= 2) {\n reduction_op(\n reduce_val, __shfl_xor_sync(0xffffffff, reduce_val, i, WARP_SIZE));\n }\n\n // Reduce across warp if needed\n // Load value to shared mem\n if (!SINGLE_WARP) {\n unsigned int warp_idx = thread_idx.x / WARP_SIZE;\n unsigned int lane_idx = thread_idx.x % WARP_SIZE;\n unsigned int reduce_group_id = thread_idx.z * block_dim.y + thread_idx.y;\n bool is_warp_head = lane_idx == 0;\n unsigned int reduction_size = block_dim.x;\n unsigned int num_of_warps = reduction_size / WARP_SIZE;\n unsigned int smem_offset = reduce_group_id * num_of_warps;\n\n block_sync::sync();\n\n if (is_warp_head) {\n shared_mem[smem_offset + warp_idx] = reduce_val;\n }\n\n block_sync::sync();\n\n if (warp_idx == 0) {\n // This assumes num_of_warps will be < 32, meaning < 1024 threads.\n // Should be true for long enough.\n assert(num_of_warps <= 32);\n\n reduce_val = lane_idx < num_of_warps ? shared_mem[smem_offset + lane_idx]\n : init_val;\n\n // Reduce within warp 0\n for (int i = 16; i >= 1; i /= 2) {\n reduction_op(\n reduce_val, __shfl_xor_sync(0xffffffff, reduce_val, i, 32));\n }\n }\n\n if (is_warp_head) {\n reduction_op(out, reduce_val);\n }\n } else {\n reduction_op(out, reduce_val);\n }\n}\n\n} // namespace warp\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\nnamespace warp {\n\ntemplate <\n bool SINGLE_WARP,\n typename T,\n typename Func,\n typename _dim3ti,\n typename _dim3bd>\n__device__ void warpReduceTIDX(\n T& out,\n const T& inp_val,\n Func reduction_op,\n const _dim3ti& thread_idx,\n const _dim3bd& block_dim,\n T* shared_mem,\n bool read_write_pred,\n T init_val) {\n constexpr int WARP_SIZE = 32;\n\n // Assume input padded to multiples of a warp\n T reduce_val = init_val;\n\n // Do warp reduction\n if (read_write_pred) {\n reduce_val = inp_val;\n }\n\n // Reduce within each warp\n for (int i = 16; i >= 1; i /= 2) {\n reduction_op(\n reduce_val, __shfl_xor_sync(0xffffffff, reduce_val, i, WARP_SIZE));\n }\n\n // Reduce across warp if needed\n // Load value to shared mem\n if (!SINGLE_WARP) {\n unsigned int warp_idx = thread_idx.x / WARP_SIZE;\n unsigned int lane_idx = thread_idx.x % WARP_SIZE;\n unsigned int reduce_group_id = thread_idx.z * block_dim.y + thread_idx.y;\n bool is_warp_head = lane_idx == 0;\n unsigned int reduction_size = block_dim.x;\n unsigned int num_of_warps = reduction_size / WARP_SIZE;\n unsigned int smem_offset = reduce_group_id * num_of_warps;\n\n block_sync::sync();\n\n if (is_warp_head) {\n shared_mem[smem_offset + warp_idx] = reduce_val;\n }\n\n block_sync::sync();\n\n if (warp_idx == 0) {\n // This assumes num_of_warps will be < 32, meaning < 1024 threads.\n // Should be true for long enough.\n assert(num_of_warps <= 32);\n\n reduce_val = lane_idx < num_of_warps ? shared_mem[smem_offset + lane_idx]\n : init_val;\n\n // Reduce within warp 0\n for (int i = 16; i >= 1; i /= 2) {\n reduction_op(\n reduce_val, __shfl_xor_sync(0xffffffff, reduce_val, i, 32));\n }\n }\n\n if (is_warp_head) {\n reduction_op(out, reduce_val);\n }\n } else {\n reduction_op(out, reduce_val);\n }\n}\n\n} // namespace warp\n###" }, { "cuda": "\nnamespace warp {\n\ntemplate <\n bool SINGLE_WARP,\n typename T,\n typename Func,\n typename _dim3ti,\n typename _dim3bd>\n__device__ void warpReduceTIDX(\n T& out,\n const T& inp_val,\n Func reduction_op,\n const _dim3ti& thread_idx,\n const _dim3bd& block_dim,\n T* shared_mem,\n bool read_write_pred,\n T init_val) {\n constexpr int WARP_SIZE = warpSize;\n\n // Assume input padded to multiples of a warp\n T reduce_val = init_val;\n\n // Do warp reduction\n if (read_write_pred) {\n reduce_val = inp_val;\n }\n\n // Reduce within each warp\n for (int i = WARP_SIZE/2; i >= 1; i /= 2) {\n reduction_op(\n reduce_val, __shfl_xor(reduce_val, i, WARP_SIZE));\n }\n\n // Reduce across warp if needed\n // Load value to shared mem\n if (!SINGLE_WARP) {\n unsigned int warp_idx = thread_idx.x / WARP_SIZE;\n unsigned int lane_idx = thread_idx.x % WARP_SIZE;\n unsigned int reduce_group_id = thread_idx.z * block_dim.y + thread_idx.y;\n bool is_warp_head = lane_idx == 0;\n unsigned int reduction_size = block_dim.x;\n unsigned int num_of_warps = reduction_size / WARP_SIZE;\n unsigned int smem_offset = reduce_group_id * num_of_warps;\n\n block_sync::sync();\n\n if (read_write_pred && is_warp_head) {\n shared_mem[smem_offset + warp_idx] = reduce_val;\n }\n\n block_sync::sync();\n\n if (warp_idx == 0) {\n // This assumes num_of_warps will be < 32, meaning < 1024 threads.\n // Should be true for long enough.\n assert(num_of_warps <= 32);\n\n reduce_val = lane_idx < num_of_warps ? shared_mem[smem_offset + lane_idx]\n : init_val;\n\n // Reduce within warp 0\n for (int i = WARP_SIZE/2; i >= 1; i /= 2) {\n reduction_op(\n reduce_val, __shfl_xor(reduce_val, i, WARP_SIZE));\n }\n }\n\n if (is_warp_head) {\n reduction_op(out, reduce_val);\n }\n } else {\n reduction_op(out, reduce_val);\n }\n}\n\n} // namespace warp\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\nnamespace warp {\n\ntemplate <\n bool SINGLE_WARP,\n typename T,\n typename Func,\n typename _dim3ti,\n typename _dim3bd>\n__device__ void warpReduceTIDX(\n T& out,\n const T& inp_val,\n Func reduction_op,\n const _dim3ti& thread_idx,\n const _dim3bd& block_dim,\n T* shared_mem,\n bool read_write_pred,\n T init_val) {\n constexpr int WARP_SIZE = warpSize;\n\n // Assume input padded to multiples of a warp\n T reduce_val = init_val;\n\n // Do warp reduction\n if (read_write_pred) {\n reduce_val = inp_val;\n }\n\n // Reduce within each warp\n for (int i = WARP_SIZE/2; i >= 1; i /= 2) {\n reduction_op(\n reduce_val, __shfl_xor(reduce_val, i, WARP_SIZE));\n }\n\n // Reduce across warp if needed\n // Load value to shared mem\n if (!SINGLE_WARP) {\n unsigned int warp_idx = thread_idx.x / WARP_SIZE;\n unsigned int lane_idx = thread_idx.x % WARP_SIZE;\n unsigned int reduce_group_id = thread_idx.z * block_dim.y + thread_idx.y;\n bool is_warp_head = lane_idx == 0;\n unsigned int reduction_size = block_dim.x;\n unsigned int num_of_warps = reduction_size / WARP_SIZE;\n unsigned int smem_offset = reduce_group_id * num_of_warps;\n\n block_sync::sync();\n\n if (read_write_pred && is_warp_head) {\n shared_mem[smem_offset + warp_idx] = reduce_val;\n }\n\n block_sync::sync();\n\n if (warp_idx == 0) {\n // This assumes num_of_warps will be < 32, meaning < 1024 threads.\n // Should be true for long enough.\n assert(num_of_warps <= 32);\n\n reduce_val = lane_idx < num_of_warps ? shared_mem[smem_offset + lane_idx]\n : init_val;\n\n // Reduce within warp 0\n for (int i = WARP_SIZE/2; i >= 1; i /= 2) {\n reduction_op(\n reduce_val, __shfl_xor(reduce_val, i, WARP_SIZE));\n }\n }\n\n if (is_warp_head) {\n reduction_op(out, reduce_val);\n }\n } else {\n reduction_op(out, reduce_val);\n }\n}\n\n} // namespace warp\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\nnamespace {\nCONSTEXPR_EXCEPT_WIN_CUDA char bessel_j1_name[] = \"bessel_j1_forward\";\n\nvoid bessel_j1_kernel_cuda(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"bessel_j1_cuda\", [&]() {\n jitted_gpu_kernel(iterator, bessel_j1_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"bessel_j1_cuda\", [&]() {\n gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {\n return bessel_j1_forward(a);\n });\n });\n#endif // AT_USE_JITERATOR()\n}\n\n} // anonymous namespace\n\nREGISTER_DISPATCH(special_bessel_j1_stub, &bessel_j1_kernel_cuda);\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\nnamespace {\nCONSTEXPR_EXCEPT_WIN_HIP char bessel_j1_name[] = \"bessel_j1_forward\";\n\nvoid bessel_j1_kernel_hip(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"bessel_j1_hip\", [&]() {\n jitted_gpu_kernel(iterator, bessel_j1_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"bessel_j1_hip\", [&]() {\n gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {\n return bessel_j1_forward(a);\n });\n });\n#endif // AT_USE_JITERATOR()\n}\n\n} // anonymous namespace\n\nREGISTER_DISPATCH(special_bessel_j1_stub, &bessel_j1_kernel_hip);\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_CUDA char bessel_y0_name[] = \"bessel_y0_forward\";\n\n void bessel_y0_kernel_cuda(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"bessel_y0_cuda\", [&]() {\n jitted_gpu_kernel(iterator, bessel_y0_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"bessel_y0_cuda\", [&]() {\n gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {\n return bessel_y0_forward(a);\n });\n });\n#endif // AT_USE_JITERATOR()\n }\n }\n\n REGISTER_DISPATCH(special_bessel_y0_stub, &bessel_y0_kernel_cuda);\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_HIP char bessel_y0_name[] = \"bessel_y0_forward\";\n\n void bessel_y0_kernel_hip(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"bessel_y0_hip\", [&]() {\n jitted_gpu_kernel(iterator, bessel_y0_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"bessel_y0_hip\", [&]() {\n gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {\n return bessel_y0_forward(a);\n });\n });\n#endif // AT_USE_JITERATOR()\n }\n }\n\n REGISTER_DISPATCH(special_bessel_y0_stub, &bessel_y0_kernel_hip);\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_CUDA char bessel_y1_name[] = \"bessel_y1_forward\";\n\n void bessel_y1_kernel_cuda(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"bessel_y1_cuda\", [&]() {\n jitted_gpu_kernel(iterator, bessel_y1_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"bessel_y1_cuda\", [&]() {\n gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {\n return bessel_y1_forward(a);\n });\n });\n#endif // AT_USE_JITERATOR()\n }\n }\n\n REGISTER_DISPATCH(special_bessel_y1_stub, &bessel_y1_kernel_cuda);\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_HIP char bessel_y1_name[] = \"bessel_y1_forward\";\n\n void bessel_y1_kernel_hip(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"bessel_y1_hip\", [&]() {\n jitted_gpu_kernel(iterator, bessel_y1_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"bessel_y1_hip\", [&]() {\n gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {\n return bessel_y1_forward(a);\n });\n });\n#endif // AT_USE_JITERATOR()\n }\n }\n\n REGISTER_DISPATCH(special_bessel_y1_stub, &bessel_y1_kernel_hip);\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n\n// NOTE: CUDA on Windows requires that the enclosing function\n// of a __device__ lambda not have internal linkage.\n\nnamespace at::native {\n\ntemplate\nstruct BitwiseAndFunctor {\n __device__ __forceinline__ scalar_t operator()(scalar_t a, scalar_t b) const {\n return a & b;\n }\n};\n\ntemplate<>\nstruct BitwiseAndFunctor {\n __device__ __forceinline__ bool operator()(bool a, bool b) const {\n return a && b;\n }\n};\n\nvoid bitwise_and_kernel_cuda(TensorIteratorBase& iter) {\n AT_DISPATCH_INTEGRAL_TYPES_AND(kBool, iter.dtype(), \"bitwise_and_cuda\", [&]() {\n BitwiseAndFunctor f;\n opmath_symmetric_gpu_kernel_with_scalars(iter, f);\n });\n}\n\ntemplate\nstruct BitwiseOrFunctor {\n __device__ __forceinline__ scalar_t operator()(scalar_t a, scalar_t b) const {\n return a | b;\n }\n};\n\ntemplate<>\nstruct BitwiseOrFunctor {\n __device__ __forceinline__ bool operator()(bool a, bool b) const {\n return a || b;\n }\n};\n\nvoid bitwise_or_kernel_cuda(TensorIteratorBase& iter) {\n AT_DISPATCH_INTEGRAL_TYPES_AND(kBool, iter.dtype(), \"bitwise_or_cuda\", [&]() {\n BitwiseOrFunctor f;\n opmath_symmetric_gpu_kernel_with_scalars(iter, f);\n });\n}\n\ntemplate\nstruct BitwiseXorFunctor {\n __device__ __forceinline__ scalar_t operator()(scalar_t a, scalar_t b) const {\n return a ^ b;\n }\n};\n\ntemplate<>\nstruct BitwiseXorFunctor {\n __device__ __forceinline__ bool operator()(bool a, bool b) const {\n return a != b;\n }\n};\n\nvoid bitwise_xor_kernel_cuda(TensorIteratorBase& iter) {\n AT_DISPATCH_INTEGRAL_TYPES_AND(kBool, iter.dtype(), \"bitwise_xor_cuda\", [&]() {\n BitwiseXorFunctor f;\n opmath_symmetric_gpu_kernel_with_scalars(iter, f);\n });\n}\n\nREGISTER_DISPATCH(bitwise_and_stub, &bitwise_and_kernel_cuda);\nREGISTER_DISPATCH(bitwise_or_stub, &bitwise_or_kernel_cuda);\nREGISTER_DISPATCH(bitwise_xor_stub, &bitwise_xor_kernel_cuda);\n\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n\n// NOTE: HIP on Windows requires that the enclosing function\n// of a __device__ lambda not have internal linkage.\n\nnamespace at::native {\n\ntemplate\nstruct BitwiseAndFunctor {\n __device__ __forceinline__ scalar_t operator()(scalar_t a, scalar_t b) const {\n return a & b;\n }\n};\n\ntemplate<>\nstruct BitwiseAndFunctor {\n __device__ __forceinline__ bool operator()(bool a, bool b) const {\n return a && b;\n }\n};\n\nvoid bitwise_and_kernel_hip(TensorIteratorBase& iter) {\n AT_DISPATCH_INTEGRAL_TYPES_AND(kBool, iter.dtype(), \"bitwise_and_hip\", [&]() {\n BitwiseAndFunctor f;\n opmath_symmetric_gpu_kernel_with_scalars(iter, f);\n });\n}\n\ntemplate\nstruct BitwiseOrFunctor {\n __device__ __forceinline__ scalar_t operator()(scalar_t a, scalar_t b) const {\n return a | b;\n }\n};\n\ntemplate<>\nstruct BitwiseOrFunctor {\n __device__ __forceinline__ bool operator()(bool a, bool b) const {\n return a || b;\n }\n};\n\nvoid bitwise_or_kernel_hip(TensorIteratorBase& iter) {\n AT_DISPATCH_INTEGRAL_TYPES_AND(kBool, iter.dtype(), \"bitwise_or_hip\", [&]() {\n BitwiseOrFunctor f;\n opmath_symmetric_gpu_kernel_with_scalars(iter, f);\n });\n}\n\ntemplate\nstruct BitwiseXorFunctor {\n __device__ __forceinline__ scalar_t operator()(scalar_t a, scalar_t b) const {\n return a ^ b;\n }\n};\n\ntemplate<>\nstruct BitwiseXorFunctor {\n __device__ __forceinline__ bool operator()(bool a, bool b) const {\n return a != b;\n }\n};\n\nvoid bitwise_xor_kernel_hip(TensorIteratorBase& iter) {\n AT_DISPATCH_INTEGRAL_TYPES_AND(kBool, iter.dtype(), \"bitwise_xor_hip\", [&]() {\n BitwiseXorFunctor f;\n opmath_symmetric_gpu_kernel_with_scalars(iter, f);\n });\n}\n\nREGISTER_DISPATCH(bitwise_and_stub, &bitwise_and_kernel_hip);\nREGISTER_DISPATCH(bitwise_or_stub, &bitwise_or_kernel_hip);\nREGISTER_DISPATCH(bitwise_xor_stub, &bitwise_xor_kernel_hip);\n\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \nnamespace at::native {\nnamespace binary_internal {\nvoid div_floor_kernel_cuda(TensorIteratorBase& iter) {\n \n const auto dtype = iter.common_dtype();\n if (dtype == kByte) {\n \n \n \n return div_trunc_kernel_cuda(iter);\n } else if (isIntegralType(dtype, false)) {\n AT_DISPATCH_INTEGRAL_TYPES(dtype, \"div_floor_cuda\", [&]() {\n gpu_kernel_with_scalars(\n iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {\n return div_floor_integer(a, b);\n });\n });\n } else if (iter.is_cpu_scalar(2)) {\n \n \n \n AT_DISPATCH_FLOATING_TYPES_AND2(\n kHalf, kBFloat16, dtype, \"div_floor_cuda\", [&]() {\n using accscalar_t = at::acc_type;\n auto b = iter.scalar_value(2);\n if (C10_UNLIKELY(b == 0)) {\n return div_true_kernel_cuda(iter);\n }\n auto inv_b = accscalar_t(1.0) / b;\n iter.remove_operand(2);\n gpu_kernel(iter, [b, inv_b] GPU_LAMBDA(scalar_t a) -> scalar_t {\n auto mod = std::fmod(a, b);\n auto div = (a - mod) * inv_b;\n if ((mod != 0) && (b < 0) != (mod < 0)) {\n div -= scalar_t(1);\n }\n scalar_t floordiv;\n if (div != 0) {\n floordiv = std::floor(div);\n if (div - floordiv > scalar_t(0.5)) {\n floordiv += scalar_t(1.0);\n }\n } else {\n floordiv = c10::cuda::compat::copysign(scalar_t(0), a * inv_b);\n }\n return floordiv;\n });\n });\n } else {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n kHalf, kBFloat16, dtype, \"div_floor_cuda\", [&]() {\n gpu_kernel_with_scalars(\n iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {\n return div_floor_floating(a, b);\n });\n });\n }\n}\n} \nREGISTER_DISPATCH(div_floor_stub, &binary_internal::div_floor_kernel_cuda);\n} \n\n###", "hip": " \n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \nnamespace at::native {\nnamespace binary_internal {\nvoid div_floor_kernel_hip(TensorIteratorBase& iter) {\n \n const auto dtype = iter.common_dtype();\n if (dtype == kByte) {\n \n \n \n return div_trunc_kernel_hip(iter);\n } else if (isIntegralType(dtype, false)) {\n AT_DISPATCH_INTEGRAL_TYPES(dtype, \"div_floor_hip\", [&]() {\n gpu_kernel_with_scalars(\n iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {\n return div_floor_integer(a, b);\n });\n });\n } else if (iter.is_cpu_scalar(2)) {\n \n \n \n AT_DISPATCH_FLOATING_TYPES_AND2(\n kHalf, kBFloat16, dtype, \"div_floor_hip\", [&]() {\n using accscalar_t = at::acc_type;\n auto b = iter.scalar_value(2);\n if (C10_UNLIKELY(b == 0)) {\n return div_true_kernel_hip(iter);\n }\n auto inv_b = accscalar_t(1.0) / b;\n iter.remove_operand(2);\n gpu_kernel(iter, [b, inv_b] GPU_LAMBDA(scalar_t a) -> scalar_t {\n auto mod = ::fmod(a, b);\n auto div = (a - mod) * inv_b;\n if ((mod != 0) && (b < 0) != (mod < 0)) {\n div -= scalar_t(1);\n }\n scalar_t floordiv;\n if (div != 0) {\n floordiv = ::floor(div);\n if (div - floordiv > scalar_t(0.5)) {\n floordiv += scalar_t(1.0);\n }\n } else {\n floordiv = c10::hip::compat::copysign(scalar_t(0), a * inv_b);\n }\n return floordiv;\n });\n });\n } else {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n kHalf, kBFloat16, dtype, \"div_floor_hip\", [&]() {\n gpu_kernel_with_scalars(\n iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {\n return div_floor_floating(a, b);\n });\n });\n }\n}\n} \nREGISTER_DISPATCH(div_floor_stub, &binary_internal::div_floor_kernel_hip);\n} ###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n\nnamespace at::native {\nnamespace binary_internal {\n\nCONSTEXPR_EXCEPT_WIN_CUDA char div_name[] = \"div_kernel\";\nvoid div_true_kernel_cuda(TensorIteratorBase& iter) {\n auto common_dtype = iter.common_dtype();\n if (iter.common_dtype() == kComplexHalf) {\n using scalar_t = c10::complex;\n#if AT_USE_JITERATOR()\n static const auto div_string = jiterator_stringify(\n template T div_kernel(T a, T b) { return a / b; });\n opmath_jitted_gpu_kernel_with_scalars(\n iter, div_string);\n#else\n using opmath_t = at::opmath_type;\n opmath_gpu_kernel_with_scalars(iter, DivFunctor());\n#endif\n return;\n }\n if (iter.is_cpu_scalar(2)) {\n // optimization for floating-point types: if the second operand is a CPU\n // scalar, compute a * reciprocal(b). Note that this may lose one bit of\n // precision compared to computing the division.\n AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(\n kHalf, kBFloat16, common_dtype, \"div_true_cuda\", [&]() {\n using opmath_t = at::opmath_type;\n auto inv_b = opmath_t(1.0) / iter.scalar_value(2);\n iter.remove_operand(2);\n gpu_kernel(\n iter,\n BUnaryFunctor>(\n MulFunctor(), inv_b));\n });\n } else {\n AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(\n kHalf, kBFloat16, common_dtype, \"div_true_cuda\", [&]() {\n DivFunctor f;\n gpu_kernel_with_scalars(iter, f);\n });\n }\n}\n} // namespace binary_internal\n\nREGISTER_DISPATCH(div_true_stub, &binary_internal::div_true_kernel_cuda);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n\nnamespace at::native {\nnamespace binary_internal {\n\nCONSTEXPR_EXCEPT_WIN_HIP char div_name[] = \"div_kernel\";\nvoid div_true_kernel_hip(TensorIteratorBase& iter) {\n auto common_dtype = iter.common_dtype();\n if (iter.common_dtype() == kComplexHalf) {\n using scalar_t = c10::complex;\n#if AT_USE_JITERATOR()\n static const auto div_string = jiterator_stringify(\n template T div_kernel(T a, T b) { return a / b; });\n opmath_jitted_gpu_kernel_with_scalars(\n iter, div_string);\n#else\n using opmath_t = at::opmath_type;\n opmath_gpu_kernel_with_scalars(iter, DivFunctor());\n#endif\n return;\n }\n if (iter.is_cpu_scalar(2)) {\n // optimization for floating-point types: if the second operand is a CPU\n // scalar, compute a * reciprocal(b). Note that this may lose one bit of\n // precision compared to computing the division.\n AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(\n kHalf, kBFloat16, common_dtype, \"div_true_hip\", [&]() {\n using opmath_t = at::opmath_type;\n auto inv_b = opmath_t(1.0) / iter.scalar_value(2);\n iter.remove_operand(2);\n gpu_kernel(\n iter,\n BUnaryFunctor>(\n MulFunctor(), inv_b));\n });\n } else {\n AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(\n kHalf, kBFloat16, common_dtype, \"div_true_hip\", [&]() {\n DivFunctor f;\n gpu_kernel_with_scalars(iter, f);\n });\n }\n}\n} // namespace binary_internal\n\nREGISTER_DISPATCH(div_true_stub, &binary_internal::div_true_kernel_hip);\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n\nnamespace at::native {\nnamespace binary_internal {\n\nvoid div_trunc_kernel_cuda(TensorIteratorBase& iter) {\n auto dtype = iter.common_dtype();\n if (isIntegralType(dtype, /*includeBool*/ false)) {\n AT_DISPATCH_INTEGRAL_TYPES(dtype, \"div_trunc_cuda\", [&]() {\n gpu_kernel_with_scalars(\n iter,\n [] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a / b; });\n });\n } else if (iter.is_cpu_scalar(2)) {\n // optimization for floating-point types: if the second operand is a CPU\n // scalar, compute a * reciprocal(b). Note that this may lose one bit of\n // precision compared to computing the division.\n AT_DISPATCH_FLOATING_TYPES_AND2(\n kHalf, kBFloat16, dtype, \"div_trunc_cuda\", [&]() {\n using accscalar_t = at::acc_type;\n auto inv_b = accscalar_t(1.0) / iter.scalar_value(2);\n iter.remove_operand(2);\n gpu_kernel(iter, [inv_b] GPU_LAMBDA(scalar_t a) -> scalar_t {\n return std::trunc(a * inv_b);\n });\n });\n } else {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n kHalf, kBFloat16, dtype, \"div_trunc_cuda\", [&]() {\n gpu_kernel_with_scalars(\n iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {\n return std::trunc(a / b);\n });\n });\n }\n}\n} // namespace binary_internal\n\nREGISTER_DISPATCH(div_trunc_stub, &binary_internal::div_trunc_kernel_cuda);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n\nnamespace at::native {\nnamespace binary_internal {\n\nvoid div_trunc_kernel_hip(TensorIteratorBase& iter) {\n auto dtype = iter.common_dtype();\n if (isIntegralType(dtype, /*includeBool*/ false)) {\n AT_DISPATCH_INTEGRAL_TYPES(dtype, \"div_trunc_hip\", [&]() {\n gpu_kernel_with_scalars(\n iter,\n [] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a / b; });\n });\n } else if (iter.is_cpu_scalar(2)) {\n // optimization for floating-point types: if the second operand is a CPU\n // scalar, compute a * reciprocal(b). Note that this may lose one bit of\n // precision compared to computing the division.\n AT_DISPATCH_FLOATING_TYPES_AND2(\n kHalf, kBFloat16, dtype, \"div_trunc_hip\", [&]() {\n using accscalar_t = at::acc_type;\n auto inv_b = accscalar_t(1.0) / iter.scalar_value(2);\n iter.remove_operand(2);\n gpu_kernel(iter, [inv_b] GPU_LAMBDA(scalar_t a) -> scalar_t {\n return std::trunc(a * inv_b);\n });\n });\n } else {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n kHalf, kBFloat16, dtype, \"div_trunc_hip\", [&]() {\n gpu_kernel_with_scalars(\n iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {\n return std::trunc(a / b);\n });\n });\n }\n}\n} // namespace binary_internal\n\nREGISTER_DISPATCH(div_trunc_stub, &binary_internal::div_trunc_kernel_hip);\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n\n// NOTE: CUDA on Windows requires that the enclosing function\n// of a __device__ lambda not have internal linkage.\n\nnamespace at::native {\n\nvoid atan2_kernel_cuda(TensorIteratorBase& iter) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half, at::ScalarType::BFloat16,\n iter.common_dtype(), \"atan2_cuda\",\n [&]() {\n gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {\n return ::atan2(a, b);\n });\n });\n}\n\nvoid hypot_kernel_cuda(TensorIteratorBase& iter) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half, at::ScalarType::BFloat16,\n iter.common_dtype(), \"hypot_cuda\",\n [&]() {\n opmath_symmetric_gpu_kernel_with_scalars(\n iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {\n return ::hypot(a, b);\n });\n });\n}\n\nREGISTER_DISPATCH(atan2_stub, &atan2_kernel_cuda);\nREGISTER_DISPATCH(hypot_stub, &hypot_kernel_cuda);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n\n// NOTE: HIP on Windows requires that the enclosing function\n// of a __device__ lambda not have internal linkage.\n\nnamespace at::native {\n\nvoid atan2_kernel_hip(TensorIteratorBase& iter) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half, at::ScalarType::BFloat16,\n iter.common_dtype(), \"atan2_hip\",\n [&]() {\n gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {\n return ::atan2(a, b);\n });\n });\n}\n\nvoid hypot_kernel_hip(TensorIteratorBase& iter) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n at::ScalarType::Half, at::ScalarType::BFloat16,\n iter.common_dtype(), \"hypot_hip\",\n [&]() {\n opmath_symmetric_gpu_kernel_with_scalars(\n iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {\n return ::hypot(a, b);\n });\n });\n}\n\nREGISTER_DISPATCH(atan2_stub, &atan2_kernel_hip);\nREGISTER_DISPATCH(hypot_stub, &hypot_kernel_hip);\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n\n// NOTE: CUDA on Windows requires that the enclosing function\n// of a __device__ lambda not have internal linkage.\n\nnamespace at::native {\n\nCONSTEXPR_EXCEPT_WIN_CUDA char mul_name[] = \"mul_kernel\";\nvoid mul_kernel_cuda(TensorIteratorBase& iter) {\n auto common_dtype = iter.common_dtype();\n if (common_dtype == kComplexHalf) {\n using scalar_t = c10::complex;\n#if AT_USE_JITERATOR()\n static const auto mul_string = jiterator_stringify(\n template T mul_kernel(T a, T b) { return a * b; });\n opmath_jitted_gpu_kernel_with_scalars(\n iter, mul_string);\n#else\n using opmath_t = at::opmath_type;\n opmath_symmetric_gpu_kernel_with_scalars(\n iter, binary_internal::MulFunctor());\n#endif\n } else {\n AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(\n kHalf, kBFloat16, kBool, iter.common_dtype(), \"mul_cuda\", [&]() {\n using opmath_t = at::opmath_type;\n opmath_symmetric_gpu_kernel_with_scalars(\n iter, binary_internal::MulFunctor());\n });\n }\n}\n\nREGISTER_DISPATCH(mul_stub, &mul_kernel_cuda);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n\n// NOTE: HIP on Windows requires that the enclosing function\n// of a __device__ lambda not have internal linkage.\n\nnamespace at::native {\n\nCONSTEXPR_EXCEPT_WIN_HIP char mul_name[] = \"mul_kernel\";\nvoid mul_kernel_hip(TensorIteratorBase& iter) {\n auto common_dtype = iter.common_dtype();\n if (common_dtype == kComplexHalf) {\n using scalar_t = c10::complex;\n#if AT_USE_JITERATOR()\n static const auto mul_string = jiterator_stringify(\n template T mul_kernel(T a, T b) { return a * b; });\n opmath_jitted_gpu_kernel_with_scalars(\n iter, mul_string);\n#else\n using opmath_t = at::opmath_type;\n opmath_symmetric_gpu_kernel_with_scalars(\n iter, binary_internal::MulFunctor());\n#endif\n } else {\n AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(\n kHalf, kBFloat16, kBool, iter.common_dtype(), \"mul_hip\", [&]() {\n using opmath_t = at::opmath_type;\n opmath_symmetric_gpu_kernel_with_scalars(\n iter, binary_internal::MulFunctor());\n });\n }\n}\n\nREGISTER_DISPATCH(mul_stub, &mul_kernel_hip);\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n\nnamespace at {\nnamespace cuda {\nnamespace cub {\n\nnamespace {\ntemplate \nstruct SumOp {\n __device__ scalar_t operator () (scalar_t a, scalar_t b) const {\n return a + b;\n }\n};\n}\n\ntemplate \nvoid inclusive_sum_truncating(const input_t *input, output_t *output, int64_t num_items) {\n using NO_ROCM(at_cuda_detail)::cub::Sum;\n inclusive_scan(input, output, Sum{}, num_items);\n}\n\ntemplate void inclusive_sum_truncating(const int32_t *input, int32_t *output, int64_t num_items);\ntemplate void inclusive_sum_truncating(const int64_t *input, int64_t *output, int64_t num_items);\ntemplate void inclusive_sum_truncating(const int32_t *input, int64_t *output, int64_t num_items);\n\ntemplate \nvoid exclusive_sum_in_common_type(const input_t *input, output_t *output, int64_t num_items) {\n using scalar_t = std::common_type_t;\n exclusive_scan(input, output, SumOp{}, scalar_t(0), num_items);\n}\n\ntemplate void exclusive_sum_in_common_type(const int32_t *input, int32_t *output, int64_t num_items);\ntemplate void exclusive_sum_in_common_type(const int64_t *input, int64_t *output, int64_t num_items);\n\nnamespace {\nstruct CountMaskOp {\n __device__ int64_t operator() (const uint8_t &x) const {\n return x != 0;\n }\n};\n}\n\nvoid mask_exclusive_sum(const uint8_t *mask, int64_t *output_idx, int64_t n) {\n CountMaskOp op{};\n auto iter = NO_ROCM(at_cuda_detail)::cub::TransformInputIterator<\n bool, decltype(op), decltype(mask)>(mask, op);\n exclusive_scan(iter, output_idx, SumOp{}, int64_t{0}, n);\n}\n\n}}} // namespace at::cuda::cub\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n\nnamespace at {\nnamespace hip {\nnamespace cub {\n\nnamespace {\ntemplate \nstruct SumOp {\n __device__ scalar_t operator () (scalar_t a, scalar_t b) const {\n return a + b;\n }\n};\n}\n\ntemplate \nvoid inclusive_sum_truncating(const input_t *input, output_t *output, int64_t num_items) {\n using NO_ROCM(at_hip_detail)::hipcub::Sum;\n inclusive_scan(input, output, Sum{}, num_items);\n}\n\ntemplate void inclusive_sum_truncating(const int32_t *input, int32_t *output, int64_t num_items);\ntemplate void inclusive_sum_truncating(const int64_t *input, int64_t *output, int64_t num_items);\ntemplate void inclusive_sum_truncating(const int32_t *input, int64_t *output, int64_t num_items);\n\ntemplate \nvoid exclusive_sum_in_common_type(const input_t *input, output_t *output, int64_t num_items) {\n using scalar_t = std::common_type_t;\n exclusive_scan(input, output, SumOp{}, scalar_t(0), num_items);\n}\n\ntemplate void exclusive_sum_in_common_type(const int32_t *input, int32_t *output, int64_t num_items);\ntemplate void exclusive_sum_in_common_type(const int64_t *input, int64_t *output, int64_t num_items);\n\nnamespace {\nstruct CountMaskOp {\n __device__ int64_t operator() (const uint8_t &x) const {\n return x != 0;\n }\n};\n}\n\nvoid mask_exclusive_sum(const uint8_t *mask, int64_t *output_idx, int64_t n) {\n CountMaskOp op{};\n auto iter = NO_ROCM(at_hip_detail)::hipcub::TransformInputIterator<\n bool, decltype(op), decltype(mask)>(mask, op);\n exclusive_scan(iter, output_idx, SumOp{}, int64_t{0}, n);\n}\n\n}}} // namespace at::cuda::cub\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n\n// NOTE: CUDA on Windows requires that the enclosing function\n// of a __device__ lambda not have internal linkage.\n\nnamespace at::native {\n\nvoid remainder_kernel_cuda(TensorIteratorBase& iter) {\n if (isIntegralType(iter.common_dtype(), /*includeBool*/ false)) {\n AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), \"remainder_cuda\", [&]() {\n gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {\n scalar_t r = a % b;\n if (r != 0 && c10::signs_differ(r, b)) {\n r += b;\n }\n return r;\n });\n });\n } else {\n AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), \"remainder_cuda\", [&]() {\n gpu_kernel_with_scalars(iter,\n []GPU_LAMBDA(scalar_t a, scalar_t b) __ubsan_ignore_float_divide_by_zero__ -> scalar_t {\n auto mod = ::fmod(a, b);\n if (mod != 0 && c10::signs_differ(b, mod)) {\n mod += b;\n }\n return mod;\n });\n });\n }\n}\n\nvoid fmod_kernel_cuda(TensorIteratorBase& iter) {\n if (isIntegralType(iter.common_dtype(), /*includeBool*/ false)) {\n AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), \"fmod_cuda\", [&]() {\n gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {\n return a % b;\n });\n });\n } else {\n AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), \"fmod_cuda\", [&]() {\n gpu_kernel_with_scalars(iter,\n []GPU_LAMBDA(scalar_t a, scalar_t b) __ubsan_ignore_float_divide_by_zero__ -> scalar_t {\n return ::fmod(a, b);\n });\n });\n }\n}\n\nREGISTER_DISPATCH(remainder_stub, &remainder_kernel_cuda);\nREGISTER_DISPATCH(fmod_stub, &fmod_kernel_cuda);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n\n// NOTE: HIP on Windows requires that the enclosing function\n// of a __device__ lambda not have internal linkage.\n\nnamespace at::native {\n\nvoid remainder_kernel_hip(TensorIteratorBase& iter) {\n if (isIntegralType(iter.common_dtype(), /*includeBool*/ false)) {\n AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), \"remainder_hip\", [&]() {\n gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {\n scalar_t r = a % b;\n if (r != 0 && c10::signs_differ(r, b)) {\n r += b;\n }\n return r;\n });\n });\n } else {\n AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), \"remainder_hip\", [&]() {\n gpu_kernel_with_scalars(iter,\n []GPU_LAMBDA(scalar_t a, scalar_t b) __ubsan_ignore_float_divide_by_zero__ -> scalar_t {\n auto mod = ::fmod(a, b);\n if (mod != 0 && c10::signs_differ(b, mod)) {\n mod += b;\n }\n return mod;\n });\n });\n }\n}\n\nvoid fmod_kernel_hip(TensorIteratorBase& iter) {\n if (isIntegralType(iter.common_dtype(), /*includeBool*/ false)) {\n AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), \"fmod_hip\", [&]() {\n gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {\n return a % b;\n });\n });\n } else {\n AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), \"fmod_hip\", [&]() {\n gpu_kernel_with_scalars(iter,\n []GPU_LAMBDA(scalar_t a, scalar_t b) __ubsan_ignore_float_divide_by_zero__ -> scalar_t {\n return ::fmod(a, b);\n });\n });\n }\n}\n\nREGISTER_DISPATCH(remainder_stub, &remainder_kernel_hip);\nREGISTER_DISPATCH(fmod_stub, &fmod_kernel_hip);\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n\n// NOTE: CUDA on Windows requires that the enclosing function\n// of a __device__ lambda not have internal linkage.\n\nnamespace at::native {\n\n\nvoid lshift_kernel_cuda(TensorIteratorBase& iter) {\n AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), \"lshift_cuda\", [&]() {\n gpu_kernel_with_scalars(iter,\n []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {\n return static_cast>(a) << b;\n });\n });\n}\n\nvoid rshift_kernel_cuda(TensorIteratorBase& iter) {\n AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), \"rshift_cuda\", [&]() {\n gpu_kernel_with_scalars(iter,\n []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {\n return a >> b;\n });\n });\n}\n\nREGISTER_DISPATCH(lshift_stub, &lshift_kernel_cuda);\nREGISTER_DISPATCH(rshift_stub, &rshift_kernel_cuda);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n\n// NOTE: HIP on Windows requires that the enclosing function\n// of a __device__ lambda not have internal linkage.\n\nnamespace at::native {\n\n\nvoid lshift_kernel_hip(TensorIteratorBase& iter) {\n AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), \"lshift_hip\", [&]() {\n gpu_kernel_with_scalars(iter,\n []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {\n return static_cast>(a) << b;\n });\n });\n}\n\nvoid rshift_kernel_hip(TensorIteratorBase& iter) {\n AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), \"rshift_hip\", [&]() {\n gpu_kernel_with_scalars(iter,\n []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {\n return a >> b;\n });\n });\n}\n\nREGISTER_DISPATCH(lshift_stub, &lshift_kernel_hip);\nREGISTER_DISPATCH(rshift_stub, &rshift_kernel_hip);\n\n} // namespace at::native\n###" }, { "cuda": "\n#pragma once\n#include \n#include \n#include \nnamespace at {\nnamespace native {\nnamespace cuda_utils {\nconstexpr int kCUDABlockReduceNumThreads = 512;\n\n\n\n\nconstexpr int kCUDABlockReduceMaxThreads = C10_WARP_SIZE * C10_WARP_SIZE;\n\n\n\n\ntemplate \n__inline__ __device__ T WarpReduceSum(T val) {\n#pragma unroll\n for (int offset = (C10_WARP_SIZE >> 1); offset > 0; offset >>= 1) {\n val += WARP_SHFL_DOWN(val, offset);\n }\n return val;\n}\nstruct Block1D {\n static __forceinline__ __device__ int Tid() { return threadIdx.x; }\n static __forceinline__ __device__ int Warps() {\n return blockDim.x / C10_WARP_SIZE;\n }\n};\nstruct Block2D {\n static __forceinline__ __device__ int Tid() {\n return threadIdx.x + threadIdx.y * blockDim.x;\n }\n static __forceinline__ __device__ int Warps() {\n return blockDim.x * blockDim.y / C10_WARP_SIZE;\n }\n};\n\n\n\n\n\n\n\ntemplate \n__inline__ __device__ T BlockReduceSum(T val, T* shared) {\n const int tid = B::Tid();\n const int lid = tid % C10_WARP_SIZE;\n const int wid = tid / C10_WARP_SIZE;\n val = WarpReduceSum(val);\n __syncthreads(); \n if (lid == 0) {\n shared[wid] = val;\n }\n __syncthreads();\n val = (tid < B::Warps()) ? shared[lid] : T(0);\n if (wid == 0) {\n val = WarpReduceSum(val);\n }\n return val;\n}\ntemplate \n__inline__ __device__ T WarpReduce(T val, const ReduceOp& op) {\n#pragma unroll\n for (int offset = (C10_WARP_SIZE >> 1); offset > 0; offset >>= 1) {\n val = op.combine(val, op.warp_shfl_down(val, offset));\n }\n return val;\n}\ntemplate \n__inline__ __device__ T\nBlockReduce(T val, const ReduceOp& op, const T& identity_element, T* shared) {\n const int tid = B::Tid();\n const int lid = tid % C10_WARP_SIZE;\n const int wid = tid / C10_WARP_SIZE;\n val = WarpReduce(val, op);\n __syncthreads(); \n if (lid == 0) {\n shared[wid] = val;\n }\n __syncthreads();\n val = (tid < B::Warps()) ? shared[lid] : identity_element;\n if (wid == 0) {\n val = WarpReduce(val, op);\n }\n return val;\n}\n} \n} \n} \n\n###", "hip": " \n#include \"hip/hip_runtime.h\"\n#pragma once\n#include \n#include \n#include \nnamespace at {\nnamespace native {\nnamespace hip_utils {\nconstexpr int kHIPBlockReduceNumThreads = 512;\n\n\n\n\nconstexpr int kHIPBlockReduceMaxThreads = C10_WARP_SIZE * C10_WARP_SIZE;\n\n\n\n\ntemplate \n__inline__ __device__ T WarpReduceSum(T val) {\n#pragma unroll\n for (int offset = (C10_WARP_SIZE >> 1); offset > 0; offset >>= 1) {\n val += WARP_SHFL_DOWN(val, offset);\n }\n return val;\n}\nstruct Block1D {\n static __forceinline__ __device__ int Tid() { return threadIdx.x; }\n static __forceinline__ __device__ int Warps() {\n return blockDim.x / C10_WARP_SIZE;\n }\n};\nstruct Block2D {\n static __forceinline__ __device__ int Tid() {\n return threadIdx.x + threadIdx.y * blockDim.x;\n }\n static __forceinline__ __device__ int Warps() {\n return blockDim.x * blockDim.y / C10_WARP_SIZE;\n }\n};\n\n\n\n\n\n\n\ntemplate \n__inline__ __device__ T BlockReduceSum(T val, T* shared) {\n const int tid = B::Tid();\n const int lid = tid % C10_WARP_SIZE;\n const int wid = tid / C10_WARP_SIZE;\n val = WarpReduceSum(val);\n __syncthreads(); \n if (lid == 0) {\n shared[wid] = val;\n }\n __syncthreads();\n val = (tid < B::Warps()) ? shared[lid] : T(0);\n if (wid == 0) {\n val = WarpReduceSum(val);\n }\n return val;\n}\ntemplate \n__inline__ __device__ T WarpReduce(T val, const ReduceOp& op) {\n#pragma unroll\n for (int offset = (C10_WARP_SIZE >> 1); offset > 0; offset >>= 1) {\n val = op.combine(val, op.warp_shfl_down(val, offset));\n }\n return val;\n}\ntemplate \n__inline__ __device__ T\nBlockReduce(T val, const ReduceOp& op, const T& identity_element, T* shared) {\n const int tid = B::Tid();\n const int lid = tid % C10_WARP_SIZE;\n const int wid = tid / C10_WARP_SIZE;\n val = WarpReduce(val, op);\n __syncthreads(); \n if (lid == 0) {\n shared[wid] = val;\n }\n __syncthreads();\n val = (tid < B::Warps()) ? shared[lid] : identity_element;\n if (wid == 0) {\n val = WarpReduce(val, op);\n }\n return val;\n}\n} \n} \n} ###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_CUDA char chebyshev_polynomial_t_name[] = \"chebyshev_polynomial_t_forward\";\n\n void chebyshev_polynomial_t_kernel_cuda(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"chebyshev_polynomial_t_cuda\", [&]() {\n opmath_jitted_gpu_kernel_with_scalars(iterator, chebyshev_polynomial_t_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"chebyshev_polynomial_t_cuda\", [&]() {\n gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {\n return chebyshev_polynomial_t_forward(x, n);\n });\n });\n#endif\n } // chebyshev_polynomial_t_kernel_cuda\n } // namespace (anonymous)\n\n REGISTER_DISPATCH(chebyshev_polynomial_t_stub, &chebyshev_polynomial_t_kernel_cuda);\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_HIP char chebyshev_polynomial_t_name[] = \"chebyshev_polynomial_t_forward\";\n\n void chebyshev_polynomial_t_kernel_hip(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"chebyshev_polynomial_t_hip\", [&]() {\n opmath_jitted_gpu_kernel_with_scalars(iterator, chebyshev_polynomial_t_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"chebyshev_polynomial_t_hip\", [&]() {\n gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {\n return chebyshev_polynomial_t_forward(x, n);\n });\n });\n#endif\n } // chebyshev_polynomial_t_kernel_hip\n } // namespace (anonymous)\n\n REGISTER_DISPATCH(chebyshev_polynomial_t_stub, &chebyshev_polynomial_t_kernel_hip);\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_CUDA char chebyshev_polynomial_u_name[] = \"chebyshev_polynomial_u_forward\";\n\n void chebyshev_polynomial_u_kernel_cuda(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"chebyshev_polynomial_u_cuda\", [&]() {\n opmath_jitted_gpu_kernel_with_scalars(iterator, chebyshev_polynomial_u_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"chebyshev_polynomial_u_cuda\", [&]() {\n gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {\n return chebyshev_polynomial_u_forward(x, n);\n });\n });\n#endif\n } // chebyshev_polynomial_u_kernel_cuda\n } // namespace (anonymous)\n\n REGISTER_DISPATCH(chebyshev_polynomial_u_stub, &chebyshev_polynomial_u_kernel_cuda);\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_HIP char chebyshev_polynomial_u_name[] = \"chebyshev_polynomial_u_forward\";\n\n void chebyshev_polynomial_u_kernel_hip(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"chebyshev_polynomial_u_hip\", [&]() {\n opmath_jitted_gpu_kernel_with_scalars(iterator, chebyshev_polynomial_u_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"chebyshev_polynomial_u_hip\", [&]() {\n gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {\n return chebyshev_polynomial_u_forward(x, n);\n });\n });\n#endif\n } // chebyshev_polynomial_u_kernel_hip\n } // namespace (anonymous)\n\n REGISTER_DISPATCH(chebyshev_polynomial_u_stub, &chebyshev_polynomial_u_kernel_hip);\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_CUDA char chebyshev_polynomial_v_name[] = \"chebyshev_polynomial_v_forward\";\n\n void chebyshev_polynomial_v_kernel_cuda(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"chebyshev_polynomial_v_cuda\", [&]() {\n opmath_jitted_gpu_kernel_with_scalars(iterator, chebyshev_polynomial_v_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"chebyshev_polynomial_v_cuda\", [&]() {\n gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {\n return chebyshev_polynomial_v_forward(x, n);\n });\n });\n#endif\n } // chebyshev_polynomial_v_kernel_cuda\n } // namespace (anonymous)\n\n REGISTER_DISPATCH(chebyshev_polynomial_v_stub, &chebyshev_polynomial_v_kernel_cuda);\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_HIP char chebyshev_polynomial_v_name[] = \"chebyshev_polynomial_v_forward\";\n\n void chebyshev_polynomial_v_kernel_hip(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"chebyshev_polynomial_v_hip\", [&]() {\n opmath_jitted_gpu_kernel_with_scalars(iterator, chebyshev_polynomial_v_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"chebyshev_polynomial_v_hip\", [&]() {\n gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {\n return chebyshev_polynomial_v_forward(x, n);\n });\n });\n#endif\n } // chebyshev_polynomial_v_kernel_hip\n } // namespace (anonymous)\n\n REGISTER_DISPATCH(chebyshev_polynomial_v_stub, &chebyshev_polynomial_v_kernel_hip);\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_CUDA char chebyshev_polynomial_w_name[] = \"chebyshev_polynomial_w_forward\";\n\n void chebyshev_polynomial_w_kernel_cuda(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"chebyshev_polynomial_w_cuda\", [&]() {\n opmath_jitted_gpu_kernel_with_scalars(iterator, chebyshev_polynomial_w_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"chebyshev_polynomial_w_cuda\", [&]() {\n gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {\n return chebyshev_polynomial_w_forward(x, n);\n });\n });\n#endif\n } // chebyshev_polynomial_w_kernel_cuda\n } // namespace (anonymous)\n\n REGISTER_DISPATCH(chebyshev_polynomial_w_stub, &chebyshev_polynomial_w_kernel_cuda);\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_HIP char chebyshev_polynomial_w_name[] = \"chebyshev_polynomial_w_forward\";\n\n void chebyshev_polynomial_w_kernel_hip(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"chebyshev_polynomial_w_hip\", [&]() {\n opmath_jitted_gpu_kernel_with_scalars(iterator, chebyshev_polynomial_w_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"chebyshev_polynomial_w_hip\", [&]() {\n gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {\n return chebyshev_polynomial_w_forward(x, n);\n });\n });\n#endif\n } // chebyshev_polynomial_w_kernel_hip\n } // namespace (anonymous)\n\n REGISTER_DISPATCH(chebyshev_polynomial_w_stub, &chebyshev_polynomial_w_kernel_hip);\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n\n\n// NOTE: CUDA on Windows requires that the enclosing function\n// of a __device__ lambda not have internal linkage.\n\nnamespace at::native { namespace {\n\nenum class EqOpType {EQ, NE};\n\ntemplate\nstruct CompareEqFunctor{\n CompareEqFunctor(EqOpType op): op_(op) {}\n const EqOpType op_;\n __device__ __forceinline__ bool operator() (scalar_t a, scalar_t b) const {\n if (op_ == EqOpType::EQ) {\n return a == b;\n } else { //NE\n return a != b;\n }\n\n }\n };\n}\n\nC10_NOINLINE void compare_eq_ne_kernel(TensorIteratorBase &iter, EqOpType op) {\n AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(kComplexHalf, kHalf, kBFloat16, kBool,\n iter.common_dtype(), \"compare_eq_ne_cuda\", [&]() {\n opmath_symmetric_gpu_kernel_with_scalars(\n iter, CompareEqFunctor(op));\n });\n}\n\nvoid eq_kernel_cuda(TensorIteratorBase& iter) {\n compare_eq_ne_kernel(iter, EqOpType::EQ);\n}\n\nvoid ne_kernel_cuda(TensorIteratorBase& iter) {\n compare_eq_ne_kernel(iter, EqOpType::NE);\n}\n\nREGISTER_DISPATCH(eq_stub, &eq_kernel_cuda);\nREGISTER_DISPATCH(ne_stub, &ne_kernel_cuda);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n\n\n// NOTE: HIP on Windows requires that the enclosing function\n// of a __device__ lambda not have internal linkage.\n\nnamespace at::native { namespace {\n\nenum class EqOpType {EQ, NE};\n\ntemplate\nstruct CompareEqFunctor{\n CompareEqFunctor(EqOpType op): op_(op) {}\n const EqOpType op_;\n __device__ __forceinline__ bool operator() (scalar_t a, scalar_t b) const {\n if (op_ == EqOpType::EQ) {\n return a == b;\n } else { //NE\n return a != b;\n }\n\n }\n };\n}\n\nC10_NOINLINE void compare_eq_ne_kernel(TensorIteratorBase &iter, EqOpType op) {\n AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(kComplexHalf, kHalf, kBFloat16, kBool,\n iter.common_dtype(), \"compare_eq_ne_hip\", [&]() {\n opmath_symmetric_gpu_kernel_with_scalars(\n iter, CompareEqFunctor(op));\n });\n}\n\nvoid eq_kernel_hip(TensorIteratorBase& iter) {\n compare_eq_ne_kernel(iter, EqOpType::EQ);\n}\n\nvoid ne_kernel_hip(TensorIteratorBase& iter) {\n compare_eq_ne_kernel(iter, EqOpType::NE);\n}\n\nREGISTER_DISPATCH(eq_stub, &eq_kernel_hip);\nREGISTER_DISPATCH(ne_stub, &ne_kernel_hip);\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_ONLY_METHOD_OPERATORS\n#include \n#include \n#include \n#include \n\n// NOTE: CUDA on Windows requires that the enclosing function\n// of a __device__ lambda not have internal linkage.\n\nnamespace at::native {\nnamespace {\n\nvoid complex_kernel_cuda(TensorIterator& iter) {\n AT_DISPATCH_FLOATING_TYPES_AND(kHalf, iter.input_dtype(0), \"complex_cuda\", [&]() {\n gpu_kernel(\n iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> c10::complex {\n return c10::complex(a, b);\n });\n });\n}\n\nvoid polar_kernel_cuda(TensorIterator& iter) {\n AT_DISPATCH_FLOATING_TYPES(iter.input_dtype(0), \"polar_cuda\", [&]() {\n gpu_kernel(\n iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> c10::complex {\n return c10::complex(a * std::cos(b), a * std::sin(b));\n });\n });\n}\n\n} // anonymous namespace\n\nREGISTER_DISPATCH(complex_stub, &complex_kernel_cuda);\nREGISTER_DISPATCH(polar_stub, &polar_kernel_cuda);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_ONLY_METHOD_OPERATORS\n#include \n#include \n#include \n#include \n\n// NOTE: HIP on Windows requires that the enclosing function\n// of a __device__ lambda not have internal linkage.\n\nnamespace at::native {\nnamespace {\n\nvoid complex_kernel_hip(TensorIterator& iter) {\n AT_DISPATCH_FLOATING_TYPES_AND(kHalf, iter.input_dtype(0), \"complex_hip\", [&]() {\n gpu_kernel(\n iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> c10::complex {\n return c10::complex(a, b);\n });\n });\n}\n\nvoid polar_kernel_hip(TensorIterator& iter) {\n AT_DISPATCH_FLOATING_TYPES(iter.input_dtype(0), \"polar_hip\", [&]() {\n gpu_kernel(\n iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> c10::complex {\n return c10::complex(a * std::cos(b), a * std::sin(b));\n });\n });\n}\n\n} // anonymous namespace\n\nREGISTER_DISPATCH(complex_stub, &complex_kernel_hip);\nREGISTER_DISPATCH(polar_stub, &polar_kernel_hip);\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n\n#if defined(__CUDACC__)\n#include \n#include \n#include \n#elif defined(__HIPCC__)\n#include \n#include \n#include \n#endif\n\n// NOTE: CUDA on Windows requires that the enclosing function\n// of a __device__ lambda not have internal linkage.\n\nnamespace at::native {\n\nvoid copysign_kernel_cuda(TensorIteratorBase& iter) {\n AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, iter.common_dtype(), \"copysign_cuda\", [&]() {\n gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {\n return c10::cuda::compat::copysign(a, b);\n });\n });\n}\n\nREGISTER_DISPATCH(copysign_stub, ©sign_kernel_cuda);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n\n#if defined(__HIPCC__)\n#include \n#include \n#include \n#elif defined(__HIPCC__)\n#include \n#include \n#include \n#endif\n\n// NOTE: HIP on Windows requires that the enclosing function\n// of a __device__ lambda not have internal linkage.\n\nnamespace at::native {\n\nvoid copysign_kernel_hip(TensorIteratorBase& iter) {\n AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, iter.common_dtype(), \"copysign_hip\", [&]() {\n gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {\n return c10::hip::compat::copysign(a, b);\n });\n });\n}\n\nREGISTER_DISPATCH(copysign_stub, ©sign_kernel_hip);\n\n} // namespace at::native\n###" }, { "cuda": "\n#pragma once\n\n#if !defined(USE_ROCM)\n#include // for CUDA_VERSION\n#endif\n\n#if !defined(USE_ROCM)\n#include \n#else\n#define CUB_VERSION 0\n#endif\n\n// cub sort support for __nv_bfloat16 is added to cub 1.13 in:\n// https://github.com/NVIDIA/cub/pull/306\n#if CUB_VERSION >= 101300\n#define CUB_SUPPORTS_NV_BFLOAT16() true\n#else\n#define CUB_SUPPORTS_NV_BFLOAT16() false\n#endif\n\n// cub support for CUB_WRAPPED_NAMESPACE is added to cub 1.13.1 in:\n// https://github.com/NVIDIA/cub/pull/326\n// CUB_WRAPPED_NAMESPACE is defined globally in cmake/Dependencies.cmake\n// starting from CUDA 11.5\n#if defined(CUB_WRAPPED_NAMESPACE) || defined(THRUST_CUB_WRAPPED_NAMESPACE)\n#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() true\n#else\n#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() false\n#endif\n\n// cub support for UniqueByKey is added to cub 1.16 in:\n// https://github.com/NVIDIA/cub/pull/405\n#if CUB_VERSION >= 101600\n#define CUB_SUPPORTS_UNIQUE_BY_KEY() true\n#else\n#define CUB_SUPPORTS_UNIQUE_BY_KEY() false\n#endif\n\n// cub support for scan by key is added to cub 1.15\n// in https://github.com/NVIDIA/cub/pull/376\n#if CUB_VERSION >= 101500\n#define CUB_SUPPORTS_SCAN_BY_KEY() 1\n#else\n#define CUB_SUPPORTS_SCAN_BY_KEY() 0\n#endif\n\n// cub support for cub::FutureValue is added to cub 1.15 in:\n// https://github.com/NVIDIA/cub/pull/305\n#if CUB_VERSION >= 101500\n#define CUB_SUPPORTS_FUTURE_VALUE() true\n#else\n#define CUB_SUPPORTS_FUTURE_VALUE() false\n#endif\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#pragma once\n\n#if !defined(USE_ROCM)\n#include // for TORCH_HIP_VERSION\n#endif\n\n#if !defined(USE_ROCM)\n#include \n#else\n#define CUB_VERSION 0\n#endif\n\n// cub sort support for __nv_bfloat16 is added to cub 1.13 in:\n// https://github.com/NVIDIA/cub/pull/306\n#if CUB_VERSION >= 101300\n#define CUB_SUPPORTS_NV_BFLOAT16() true\n#else\n#define CUB_SUPPORTS_NV_BFLOAT16() false\n#endif\n\n// cub support for CUB_WRAPPED_NAMESPACE is added to cub 1.13.1 in:\n// https://github.com/NVIDIA/cub/pull/326\n// CUB_WRAPPED_NAMESPACE is defined globally in cmake/Dependencies.cmake\n// starting from HIP 11.5\n#if defined(CUB_WRAPPED_NAMESPACE) || defined(THRUST_CUB_WRAPPED_NAMESPACE)\n#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() true\n#else\n#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() false\n#endif\n\n// cub support for UniqueByKey is added to cub 1.16 in:\n// https://github.com/NVIDIA/cub/pull/405\n#if CUB_VERSION >= 101600\n#define CUB_SUPPORTS_UNIQUE_BY_KEY() true\n#else\n#define CUB_SUPPORTS_UNIQUE_BY_KEY() false\n#endif\n\n// cub support for scan by key is added to cub 1.15\n// in https://github.com/NVIDIA/cub/pull/376\n#if CUB_VERSION >= 101500\n#define CUB_SUPPORTS_SCAN_BY_KEY() 1\n#else\n#define CUB_SUPPORTS_SCAN_BY_KEY() 0\n#endif\n\n// cub support for hipcub::FutureValue is added to cub 1.15 in:\n// https://github.com/NVIDIA/cub/pull/305\n#if CUB_VERSION >= 101500\n#define CUB_SUPPORTS_FUTURE_VALUE() true\n#else\n#define CUB_SUPPORTS_FUTURE_VALUE() false\n#endif\n###" }, { "cuda": "\n#define TORCH_ASSERT_ONLY_METHOD_OPERATORS\n#include \n#include \n\n#ifndef AT_PER_OPERATOR_HEADERS\n#include \n#else\n#include \n#endif\n\n#include \n\nnamespace at::native {\n\nScalar _local_scalar_dense_cuda(const Tensor& self) {\n Scalar r;\n AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(\n kComplexHalf, kHalf, kBool, kBFloat16, self.scalar_type(), \"_local_scalar_dense_cuda\", [&] {\n scalar_t value;\n cudaStream_t stream = at::cuda::getCurrentCUDAStream();\n at::cuda::memcpy_and_sync(&value, self.const_data_ptr(), sizeof(scalar_t), cudaMemcpyDeviceToHost, stream);\n r = Scalar(value);\n });\n return r;\n}\n\n} // at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_ONLY_METHOD_OPERATORS\n#include \n#include \n\n#ifndef AT_PER_OPERATOR_HEADERS\n#include \n#else\n#include \n#endif\n\n#include \n\nnamespace at::native {\n\nScalar _local_scalar_dense_hip(const Tensor& self) {\n Scalar r;\n AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(\n kComplexHalf, kHalf, kBool, kBFloat16, self.scalar_type(), \"_local_scalar_dense_hip\", [&] {\n scalar_t value;\n hipStream_t stream = at::hip::getCurrentHIPStream();\n at::cuda::memcpy_and_sync(&value, self.const_data_ptr(), sizeof(scalar_t), hipMemcpyDeviceToHost, stream);\n r = Scalar(value);\n });\n return r;\n}\n\n} // at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n\n#include \n#include \n\n#include \n#include \n\nnamespace at::native {\n\nvoid launch_cummax_cuda_kernel(const TensorBase& self, const TensorBase& values, const TensorBase& indices, int64_t dim) {\n AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16,\n self.scalar_type(), \"cummax_cuda\", [&]() {\n scalar_t init = self.is_floating_point() ? (-1*std::numeric_limits::infinity()) : std::numeric_limits::lowest();\n scan_dim_with_indices(self, values, indices, dim, init, std::greater_equal());\n });\n}\n\nvoid launch_cummin_cuda_kernel(const TensorBase& self, const TensorBase& values, const TensorBase& indices, int64_t dim) {\n AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16,\n self.scalar_type(), \"cummin_cuda\", [&]() {\n scalar_t init = self.is_floating_point() ? std::numeric_limits::infinity() : std::numeric_limits::max();\n scan_dim_with_indices(self, values, indices, dim, init, std::less_equal());\n });\n}\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n\n#include \n#include \n\n#include \n#include \n\nnamespace at::native {\n\nvoid launch_cummax_hip_kernel(const TensorBase& self, const TensorBase& values, const TensorBase& indices, int64_t dim) {\n AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16,\n self.scalar_type(), \"cummax_hip\", [&]() {\n scalar_t init = self.is_floating_point() ? (-1*std::numeric_limits::infinity()) : std::numeric_limits::lowest();\n scan_dim_with_indices(self, values, indices, dim, init, std::greater_equal());\n });\n}\n\nvoid launch_cummin_hip_kernel(const TensorBase& self, const TensorBase& values, const TensorBase& indices, int64_t dim) {\n AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16,\n self.scalar_type(), \"cummin_hip\", [&]() {\n scalar_t init = self.is_floating_point() ? std::numeric_limits::infinity() : std::numeric_limits::max();\n scan_dim_with_indices(self, values, indices, dim, init, std::less_equal());\n });\n}\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n\n#include \n#include \n\nnamespace at::native {\n\nvoid launch_cumprod_cuda_kernel(const TensorBase& result, const TensorBase& self, int64_t dim) {\n AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(\n ScalarType::Half, ScalarType::BFloat16, self.scalar_type(), \"cumprod_cuda\", [&]() {\n scalar_t init = 1;\n scan_dim(\n self,\n result,\n dim,\n init,\n std::multiplies());\n });\n}\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n\n#include \n#include \n\nnamespace at::native {\n\nvoid launch_cumprod_hip_kernel(const TensorBase& result, const TensorBase& self, int64_t dim) {\n AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(\n ScalarType::Half, ScalarType::BFloat16, self.scalar_type(), \"cumprod_hip\", [&]() {\n scalar_t init = 1;\n scan_dim(\n self,\n result,\n dim,\n init,\n std::multiplies());\n });\n}\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n\n#include \n#include \n\nnamespace at::native {\n\nvoid launch_cumsum_cuda_kernel(const TensorBase& result, const TensorBase& self, int64_t dim) {\n AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(\n ScalarType::Half, ScalarType::BFloat16,\n self.scalar_type(), \"cumsum_cuda\",\n [&]() {\n scalar_t init = 0;\n scan_dim(\n self,\n result,\n dim,\n init,\n std::plus());\n });\n}\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n\n#include \n#include \n\nnamespace at::native {\n\nvoid launch_cumsum_hip_kernel(const TensorBase& result, const TensorBase& self, int64_t dim) {\n AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(\n ScalarType::Half, ScalarType::BFloat16,\n self.scalar_type(), \"cumsum_hip\",\n [&]() {\n scalar_t init = 0;\n scan_dim(\n self,\n result,\n dim,\n init,\n std::plus());\n });\n}\n\n} // namespace at::native\n###" }, { "cuda": "\n#pragma once\n\nnamespace at { namespace native {\n#if defined(USE_ROCM)\n// take these out when ROCm implements std:: math functions\n#include \ntemplate \nstatic __forceinline__ __device__ scalar_t device_sqrt(scalar_t val);\n\ntemplate <>\n__forceinline__ __device__ float device_sqrt(float val) {\n return ::sqrtf(val);\n}\n\ntemplate <>\n__forceinline__ __device__ double device_sqrt(double val) {\n return ::sqrt(val);\n}\n#else\ntemplate\n__forceinline__ __device__ double device_sqrt(scalar_t val) {\n return std::sqrt(val);\n}\n#endif\n}}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#pragma once\n\nnamespace at { namespace native {\n#if defined(USE_ROCM)\n// take these out when ROCm implements std:: math functions\n#include \ntemplate \nstatic __forceinline__ __device__ scalar_t device_sqrt(scalar_t val);\n\ntemplate <>\n__forceinline__ __device__ float device_sqrt(float val) {\n return ::sqrtf(val);\n}\n\ntemplate <>\n__forceinline__ __device__ double device_sqrt(double val) {\n return ::sqrt(val);\n}\n#else\ntemplate\n__forceinline__ __device__ double device_sqrt(scalar_t val) {\n return std::sqrt(val);\n}\n#endif\n}}\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\nvoid bernoulli_tensor_kernel(const TensorBase &self, const TensorBase &p_, c10::optional gen_) {\n auto generator = get_generator_or_default(gen_, cuda::detail::getDefaultCUDAGenerator());\n at::native::templates::cuda::bernoulli_kernel(self, p_, generator);\n}\n\nvoid bernoulli_scalar_kernel(const TensorBase &self, double p, c10::optional gen) {\n auto iter = TensorIterator::borrowing_nullary_op(self);\n auto generator = get_generator_or_default(gen, cuda::detail::getDefaultCUDAGenerator());\n at::native::templates::cuda::bernoulli_kernel(iter, p, generator);\n}\n\nREGISTER_DISPATCH(bernoulli_tensor_stub, &bernoulli_tensor_kernel);\nREGISTER_DISPATCH(bernoulli_scalar_stub, &bernoulli_scalar_kernel);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n\n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\nvoid bernoulli_tensor_kernel(const TensorBase &self, const TensorBase &p_, c10::optional gen_) {\n auto generator = get_generator_or_default(gen_, cuda::detail::getDefaultHIPGenerator());\n at::native::templates::cuda::bernoulli_kernel(self, p_, generator);\n}\n\nvoid bernoulli_scalar_kernel(const TensorBase &self, double p, c10::optional gen) {\n auto iter = TensorIterator::borrowing_nullary_op(self);\n auto generator = get_generator_or_default(gen, cuda::detail::getDefaultHIPGenerator());\n at::native::templates::cuda::bernoulli_kernel(iter, p, generator);\n}\n\nREGISTER_DISPATCH(bernoulli_tensor_stub, &bernoulli_tensor_kernel);\nREGISTER_DISPATCH(bernoulli_scalar_stub, &bernoulli_scalar_kernel);\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n\nnamespace at::native {\n\nvoid cauchy_kernel(TensorIteratorBase& iter, double median, double sigma, c10::optional gen) {\n auto generator = get_generator_or_default(gen, cuda::detail::getDefaultCUDAGenerator());\n at::native::templates::cuda::cauchy_kernel(iter, median, sigma, generator);\n}\n\nREGISTER_DISPATCH(cauchy_stub, &cauchy_kernel);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n\nnamespace at::native {\n\nvoid cauchy_kernel(TensorIteratorBase& iter, double median, double sigma, c10::optional gen) {\n auto generator = get_generator_or_default(gen, cuda::detail::getDefaultHIPGenerator());\n at::native::templates::cuda::cauchy_kernel(iter, median, sigma, generator);\n}\n\nREGISTER_DISPATCH(cauchy_stub, &cauchy_kernel);\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n\nnamespace at::native {\n\nvoid exponential_kernel(TensorIteratorBase& iter, double lambda, c10::optional gen) {\n auto generator = get_generator_or_default(gen, cuda::detail::getDefaultCUDAGenerator());\n at::native::templates::cuda::exponential_kernel(iter, lambda, generator);\n}\n\nREGISTER_DISPATCH(exponential_stub, &exponential_kernel);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n\nnamespace at::native {\n\nvoid exponential_kernel(TensorIteratorBase& iter, double lambda, c10::optional gen) {\n auto generator = get_generator_or_default(gen, cuda::detail::getDefaultHIPGenerator());\n at::native::templates::cuda::exponential_kernel(iter, lambda, generator);\n}\n\nREGISTER_DISPATCH(exponential_stub, &exponential_kernel);\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n\nnamespace at::native {\n\nvoid geometric_kernel(TensorIteratorBase& iter, double p_, c10::optional gen) {\n auto generator = get_generator_or_default(gen, cuda::detail::getDefaultCUDAGenerator());\n at::native::templates::cuda::geometric_kernel(iter, p_, generator);\n}\n\nREGISTER_DISPATCH(geometric_stub, &geometric_kernel);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n\nnamespace at::native {\n\nvoid geometric_kernel(TensorIteratorBase& iter, double p_, c10::optional gen) {\n auto generator = get_generator_or_default(gen, cuda::detail::getDefaultHIPGenerator());\n at::native::templates::cuda::geometric_kernel(iter, p_, generator);\n}\n\nREGISTER_DISPATCH(geometric_stub, &geometric_kernel);\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n\nnamespace at::native {\n\nvoid log_normal_kernel(TensorIteratorBase& iter, double mean, double std, c10::optional gen) {\n auto generator = get_generator_or_default(gen, cuda::detail::getDefaultCUDAGenerator());\n at::native::templates::cuda::log_normal_kernel(iter, mean, std, generator);\n}\n\nREGISTER_DISPATCH(log_normal_stub, &log_normal_kernel);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n\nnamespace at::native {\n\nvoid log_normal_kernel(TensorIteratorBase& iter, double mean, double std, c10::optional gen) {\n auto generator = get_generator_or_default(gen, cuda::detail::getDefaultHIPGenerator());\n at::native::templates::cuda::log_normal_kernel(iter, mean, std, generator);\n}\n\nREGISTER_DISPATCH(log_normal_stub, &log_normal_kernel);\n\n} // namespace at::native\n###" }, { "cuda": "\n#pragma once\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n// c10/cuda/CUDAGraphsC10Utils.h has utils used by both c10 and aten.\n// This file adds utils used by aten only.\n\nnamespace at {\nnamespace cuda {\n\nusing CaptureId_t = c10::cuda::CaptureId_t;\nusing CaptureStatus = c10::cuda::CaptureStatus;\n\n// Use this version where you don't want to create a CUDA context if none exists.\ninline CaptureStatus currentStreamCaptureStatus() {\n#if !defined(USE_ROCM) || ROCM_VERSION >= 50300\n // don't create a context if we don't have to\n if (c10::cuda::hasPrimaryContext(c10::cuda::current_device())) {\n return c10::cuda::currentStreamCaptureStatusMayInitCtx();\n } else {\n return CaptureStatus::None;\n }\n#else\n return CaptureStatus::None;\n#endif\n}\n\ninline void assertNotCapturing(std::string attempt) {\n auto status = currentStreamCaptureStatus();\n TORCH_CHECK(status == CaptureStatus::None,\n attempt,\n \" during CUDA graph capture. If you need this call to be captured, \"\n \"please file an issue. \"\n \"Current cudaStreamCaptureStatus: \",\n status);\n}\n\ninline void errorIfCapturingCudnnBenchmark(std::string version_specific) {\n auto status = currentStreamCaptureStatus();\n TORCH_CHECK(status == CaptureStatus::None,\n \"Current cudaStreamCaptureStatus: \",\n status,\n \"\\nCapturing \",\n version_specific,\n \"is prohibited. Possible causes of this error:\\n\"\n \"1. No warmup iterations occurred before capture.\\n\"\n \"2. The convolutions you're trying to capture use dynamic shapes, \"\n \"in which case capturing them is generally prohibited.\");\n}\n\n} // namespace cuda\n} // namespace at\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#pragma once\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n// c10/hip/HIPGraphsC10Utils.h has utils used by both c10 and aten.\n// This file adds utils used by aten only.\n\nnamespace at {\nnamespace hip {\n\nusing CaptureId_t = c10::hip::CaptureId_t;\nusing CaptureStatus = c10::hip::CaptureStatus;\n\n// Use this version where you don't want to create a HIP context if none exists.\ninline CaptureStatus currentStreamCaptureStatus() {\n#if !defined(USE_ROCM) || ROCM_VERSION >= 50300\n // don't create a context if we don't have to\n if (c10::hip::hasPrimaryContext(c10::hip::current_device())) {\n return c10::hip::currentStreamCaptureStatusMayInitCtx();\n } else {\n return CaptureStatus::None;\n }\n#else\n return CaptureStatus::None;\n#endif\n}\n\ninline void assertNotCapturing(std::string attempt) {\n auto status = currentStreamCaptureStatus();\n TORCH_CHECK(status == CaptureStatus::None,\n attempt,\n \" during HIP graph capture. If you need this call to be captured, \"\n \"please file an issue. \"\n \"Current hipStreamCaptureStatus: \",\n status);\n}\n\ninline void errorIfCapturingCudnnBenchmark(std::string version_specific) {\n auto status = currentStreamCaptureStatus();\n TORCH_CHECK(status == CaptureStatus::None,\n \"Current hipStreamCaptureStatus: \",\n status,\n \"\\nCapturing \",\n version_specific,\n \"is prohibited. Possible causes of this error:\\n\"\n \"1. No warmup iterations occurred before capture.\\n\"\n \"2. The convolutions you're trying to capture use dynamic shapes, \"\n \"in which case capturing them is generally prohibited.\");\n}\n\n} // namespace hip\n} // namespace at\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n\nnamespace at::native {\n\nvoid normal_kernel(const TensorBase &self, double mean, double std, c10::optional gen) {\n auto generator = get_generator_or_default(gen, cuda::detail::getDefaultCUDAGenerator());\n at::native::templates::cuda::normal_kernel(self, mean, std, generator);\n}\n\nREGISTER_DISPATCH(normal_stub, &normal_kernel);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n\nnamespace at::native {\n\nvoid normal_kernel(const TensorBase &self, double mean, double std, c10::optional gen) {\n auto generator = get_generator_or_default(gen, cuda::detail::getDefaultHIPGenerator());\n at::native::templates::cuda::normal_kernel(self, mean, std, generator);\n}\n\nREGISTER_DISPATCH(normal_stub, &normal_kernel);\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n\nnamespace at::native {\n\nvoid random_from_to_kernel(TensorIteratorBase& iter, uint64_t range, int64_t base, c10::optional gen_) {\n auto gen = get_generator_or_default(gen_, cuda::detail::getDefaultCUDAGenerator());\n at::native::templates::cuda::random_from_to_kernel(iter, range, base, gen);\n}\n\nvoid random_full_64_bits_range_kernel(TensorIteratorBase& iter, c10::optional gen_) {\n auto gen = get_generator_or_default(gen_, cuda::detail::getDefaultCUDAGenerator());\n at::native::templates::cuda::random_full_64_bits_range_kernel(iter, gen);\n}\n\nvoid random_kernel(TensorIteratorBase& iter, c10::optional gen_) {\n auto gen = get_generator_or_default(gen_, cuda::detail::getDefaultCUDAGenerator());\n at::native::templates::cuda::random_kernel(iter, gen);\n}\n\nREGISTER_DISPATCH(random_from_to_stub, &random_from_to_kernel);\nREGISTER_DISPATCH(random_stub, &random_kernel);\nREGISTER_DISPATCH(random_full_64_bits_range_stub, &random_full_64_bits_range_kernel);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n\nnamespace at::native {\n\nvoid random_from_to_kernel(TensorIteratorBase& iter, uint64_t range, int64_t base, c10::optional gen_) {\n auto gen = get_generator_or_default(gen_, cuda::detail::getDefaultHIPGenerator());\n at::native::templates::cuda::random_from_to_kernel(iter, range, base, gen);\n}\n\nvoid random_full_64_bits_range_kernel(TensorIteratorBase& iter, c10::optional gen_) {\n auto gen = get_generator_or_default(gen_, cuda::detail::getDefaultHIPGenerator());\n at::native::templates::cuda::random_full_64_bits_range_kernel(iter, gen);\n}\n\nvoid random_kernel(TensorIteratorBase& iter, c10::optional gen_) {\n auto gen = get_generator_or_default(gen_, cuda::detail::getDefaultHIPGenerator());\n at::native::templates::cuda::random_kernel(iter, gen);\n}\n\nREGISTER_DISPATCH(random_from_to_stub, &random_from_to_kernel);\nREGISTER_DISPATCH(random_stub, &random_kernel);\nREGISTER_DISPATCH(random_full_64_bits_range_stub, &random_full_64_bits_range_kernel);\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n\nnamespace at::native {\n\nvoid uniform_kernel(TensorIteratorBase& iter, double from, double to, c10::optional gen) {\n auto generator = get_generator_or_default(gen, cuda::detail::getDefaultCUDAGenerator());\n templates::cuda::uniform_kernel(iter, from, to, generator);\n}\n\nREGISTER_DISPATCH(uniform_stub, &uniform_kernel);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n\nnamespace at::native {\n\nvoid uniform_kernel(TensorIteratorBase& iter, double from, double to, c10::optional gen) {\n auto generator = get_generator_or_default(gen, cuda::detail::getDefaultHIPGenerator());\n templates::cuda::uniform_kernel(iter, from, to, generator);\n}\n\nREGISTER_DISPATCH(uniform_stub, &uniform_kernel);\n\n} // namespace at::native\n###" }, { "cuda": "\n#pragma once\n#include \n#include \n#include \n#include \n\nnamespace at {\nnamespace native {\n\nTensor embedding_backward_cuda_kernel(\n const Tensor &grad,\n const Tensor &orig_indices,\n const Tensor &sorted_indices,\n const Tensor &count,\n int64_t num_weights,\n int padding_idx = -1,\n bool mode_mean = false,\n const Tensor &offset2bag = Tensor(),\n const Tensor &bag_size = Tensor(),\n const Tensor &per_sample_weights = Tensor());\n\n}}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#pragma once\n#include \n#include \n#include \n#include \n\nnamespace at {\nnamespace native {\n\nTensor embedding_backward_hip_kernel(\n const Tensor &grad,\n const Tensor &orig_indices,\n const Tensor &sorted_indices,\n const Tensor &count,\n int64_t num_weights,\n int padding_idx = -1,\n bool mode_mean = false,\n const Tensor &offset2bag = Tensor(),\n const Tensor &bag_size = Tensor(),\n const Tensor &per_sample_weights = Tensor());\n\n}}\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\ntemplate\nstruct FillFunctor {\n FillFunctor(scalar_t v): value(v) {}\n __device__ __forceinline__ scalar_t operator() () const {\n return value;\n }\n private:\n scalar_t value;\n};\n\nvoid fill_kernel_cuda(TensorIterator& iter, const Scalar& value) {\n AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(kComplexHalf, kBool, kHalf, kBFloat16, iter.dtype(), \"fill_cuda\", [&]() {\n gpu_kernel(iter, FillFunctor(value.to()));\n });\n}\n\nREGISTER_DISPATCH(fill_stub, &fill_kernel_cuda);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\ntemplate\nstruct FillFunctor {\n FillFunctor(scalar_t v): value(v) {}\n __device__ __forceinline__ scalar_t operator() () const {\n return value;\n }\n private:\n scalar_t value;\n};\n\nvoid fill_kernel_hip(TensorIterator& iter, const Scalar& value) {\n AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(kComplexHalf, kBool, kHalf, kBFloat16, iter.dtype(), \"fill_hip\", [&]() {\n gpu_kernel(iter, FillFunctor(value.to()));\n });\n}\n\nREGISTER_DISPATCH(fill_stub, &fill_kernel_hip);\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_ONLY_METHOD_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\nnamespace {\n\ntemplate \nstruct CUDAKernelLauncher {\n static void launch(TensorIteratorBase& iter, const func_t& f) {\n gpu_kernel(iter, f);\n }\n};\n\nTensor flatten_indices_cuda_kernel(const Tensor& indices, IntArrayRef size) {\n return _flatten_indices(indices, size);\n}\n\n}\n\nREGISTER_CUDA_DISPATCH(flatten_indices_stub, &flatten_indices_cuda_kernel);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_ONLY_METHOD_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\nnamespace {\n\ntemplate \nstruct HIPKernelLauncher {\n static void launch(TensorIteratorBase& iter, const func_t& f) {\n gpu_kernel(iter, f);\n }\n};\n\nTensor flatten_indices_hip_kernel(const Tensor& indices, IntArrayRef size) {\n return _flatten_indices(indices, size);\n}\n\n}\n\nREGISTER_HIP_DISPATCH(flatten_indices_stub, &flatten_indices_hip_kernel);\n\n} // namespace at::native\n###" }, { "cuda": "\n#pragma once\n\n#include \n\nnamespace at::native {\n\n// std:: does not have clamp functors\ntemplate \nstruct minimum {\n __device__ T operator()(const T& a, const T& b) const {\n return (_isnan(a) || a < b) ? a : b;\n }\n};\n\ntemplate \nstruct maximum {\n __device__ T operator()(const T& a, const T& b) const {\n return (_isnan(a) || a > b) ? a : b;\n }\n};\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#pragma once\n\n#include \n\nnamespace at::native {\n\n// std:: does not have clamp functors\ntemplate \nstruct minimum {\n __device__ T operator()(const T& a, const T& b) const {\n return (_isnan(a) || a < b) ? a : b;\n }\n};\n\ntemplate \nstruct maximum {\n __device__ T operator()(const T& a, const T& b) const {\n return (_isnan(a) || a > b) ? a : b;\n }\n};\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_ONLY_METHOD_OPERATORS\n#include \n#include \n#include \n#include \n#include \n\n\nnamespace at::native {\n\n// note(crcrpar): To observe the CI rules, i.e. 20 minutes per file to compile, defensively split instantiations into _impl files.\n// this is only for CUDA 11.3 for which it took about 20 minutes and 28 minutes in my workstation and CI, respectively.\n// As a data point, it took about 20 seconds for CUDA 11.7 installed in my environment.\n// See https://github.com/pytorch/pytorch/pull/81705 for details.\nvoid _fused_adam_kernel_cuda_(\n at::TensorList params,\n at::TensorList grads,\n at::TensorList exp_avgs,\n at::TensorList exp_avg_sqs,\n at::TensorList max_exp_avg_sqs,\n at::TensorList state_steps,\n const double lr,\n const double beta1,\n const double beta2,\n const double weight_decay,\n const double eps,\n const bool amsgrad,\n const bool maximize,\n const c10::optional& grad_scale,\n const c10::optional& found_inf\n) {\n if (amsgrad) {\n TORCH_CHECK(\n at::native::check_fast_path_restrictions({params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs}),\n \"params, grads, exp_avgs, exp_avg_sqs, and max_exp_avg_sqs must have same dtype, device, and layout\");\n _fused_adam_amsgrad_cuda_impl_(params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, maximize, grad_scale, found_inf);\n } else {\n TORCH_CHECK(\n at::native::check_fast_path_restrictions({params, grads, exp_avgs, exp_avg_sqs}),\n \"params, grads, exp_avgs, and exp_avg_sqs must have same dtype, device, and layout\");\n _fused_adam_cuda_impl_(params, grads, exp_avgs, exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, maximize, grad_scale, found_inf);\n }\n}\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_ONLY_METHOD_OPERATORS\n#include \n#include \n#include \n#include \n#include \n\n\nnamespace at::native {\n\n// note(crcrpar): To observe the CI rules, i.e. 20 minutes per file to compile, defensively split instantiations into _impl files.\n// this is only for HIP 11.3 for which it took about 20 minutes and 28 minutes in my workstation and CI, respectively.\n// As a data point, it took about 20 seconds for HIP 11.7 installed in my environment.\n// See https://github.com/pytorch/pytorch/pull/81705 for details.\nvoid _fused_adam_kernel_hip_(\n at::TensorList params,\n at::TensorList grads,\n at::TensorList exp_avgs,\n at::TensorList exp_avg_sqs,\n at::TensorList max_exp_avg_sqs,\n at::TensorList state_steps,\n const double lr,\n const double beta1,\n const double beta2,\n const double weight_decay,\n const double eps,\n const bool amsgrad,\n const bool maximize,\n const c10::optional& grad_scale,\n const c10::optional& found_inf\n) {\n if (amsgrad) {\n TORCH_CHECK(\n at::native::check_fast_path_restrictions({params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs}),\n \"params, grads, exp_avgs, exp_avg_sqs, and max_exp_avg_sqs must have same dtype, device, and layout\");\n _fused_adam_amsgrad_hip_impl_(params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, maximize, grad_scale, found_inf);\n } else {\n TORCH_CHECK(\n at::native::check_fast_path_restrictions({params, grads, exp_avgs, exp_avg_sqs}),\n \"params, grads, exp_avgs, and exp_avg_sqs must have same dtype, device, and layout\");\n _fused_adam_hip_impl_(params, grads, exp_avgs, exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, maximize, grad_scale, found_inf);\n }\n}\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_ONLY_METHOD_OPERATORS\n#include \n#include \n#include \n#include \n#include \n\n\nnamespace at { namespace native {\n\n// note(crcrpar): To observe the CI rules, i.e. 20 minutes per file to compile, defensively split instantiations into _impl files.\n// this is only for CUDA 11.3 for which it took about 20 minutes and 28 minutes in my workstation and CI, respectively.\n// As a data point, it took about 20 seconds for CUDA 11.7 installed in my environment.\n// See https://github.com/pytorch/pytorch/pull/81705 for details.\nvoid _fused_adamw_kernel_cuda_(\n at::TensorList params,\n at::TensorList grads,\n at::TensorList exp_avgs,\n at::TensorList exp_avg_sqs,\n at::TensorList max_exp_avg_sqs,\n at::TensorList state_steps,\n const double lr,\n const double beta1,\n const double beta2,\n const double weight_decay,\n const double eps,\n const bool amsgrad,\n const bool maximize,\n const c10::optional& grad_scale,\n const c10::optional& found_inf\n) {\n if (amsgrad) {\n TORCH_CHECK(\n at::native::check_fast_path_restrictions({params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs}),\n \"params, grads, exp_avgs, exp_avg_sqs, and max_exp_avg_sqs must have same dtype, device, and layout\");\n _fused_adamw_amsgrad_cuda_impl_(params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, maximize, grad_scale, found_inf);\n } else {\n TORCH_CHECK(\n at::native::check_fast_path_restrictions({params, grads, exp_avgs, exp_avg_sqs}),\n \"params, grads, exp_avgs, and exp_avg_sqs must have same dtype, device, and layout\");\n _fused_adamw_cuda_impl_(params, grads, exp_avgs, exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, maximize, grad_scale, found_inf);\n }\n}\n\n}} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_ONLY_METHOD_OPERATORS\n#include \n#include \n#include \n#include \n#include \n\n\nnamespace at { namespace native {\n\n// note(crcrpar): To observe the CI rules, i.e. 20 minutes per file to compile, defensively split instantiations into _impl files.\n// this is only for HIP 11.3 for which it took about 20 minutes and 28 minutes in my workstation and CI, respectively.\n// As a data point, it took about 20 seconds for HIP 11.7 installed in my environment.\n// See https://github.com/pytorch/pytorch/pull/81705 for details.\nvoid _fused_adamw_kernel_hip_(\n at::TensorList params,\n at::TensorList grads,\n at::TensorList exp_avgs,\n at::TensorList exp_avg_sqs,\n at::TensorList max_exp_avg_sqs,\n at::TensorList state_steps,\n const double lr,\n const double beta1,\n const double beta2,\n const double weight_decay,\n const double eps,\n const bool amsgrad,\n const bool maximize,\n const c10::optional& grad_scale,\n const c10::optional& found_inf\n) {\n if (amsgrad) {\n TORCH_CHECK(\n at::native::check_fast_path_restrictions({params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs}),\n \"params, grads, exp_avgs, exp_avg_sqs, and max_exp_avg_sqs must have same dtype, device, and layout\");\n _fused_adamw_amsgrad_hip_impl_(params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, maximize, grad_scale, found_inf);\n } else {\n TORCH_CHECK(\n at::native::check_fast_path_restrictions({params, grads, exp_avgs, exp_avg_sqs}),\n \"params, grads, exp_avgs, and exp_avg_sqs must have same dtype, device, and layout\");\n _fused_adamw_hip_impl_(params, grads, exp_avgs, exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, maximize, grad_scale, found_inf);\n }\n}\n\n}} // namespace at::native\n###" }, { "cuda": "\n#include \n\n#include \n#include \n#include \n#include \n#include \n\nnamespace at { namespace native {\n\nvoid _fused_adamw_amsgrad_cuda_impl_(\n at::TensorList params,\n at::TensorList grads,\n at::TensorList exp_avgs,\n at::TensorList exp_avg_sqs,\n at::TensorList max_exp_avg_sqs,\n at::TensorList state_steps,\n const double lr,\n const double beta1,\n const double beta2,\n const double weight_decay,\n const double eps,\n const bool maximize,\n const c10::optional& grad_scale,\n const c10::optional& found_inf\n) {\n std::vector> tensor_lists{\n params.vec(), grads.vec(), exp_avgs.vec(), exp_avg_sqs.vec(), max_exp_avg_sqs.vec() };\n\n float* grad_scale_ptr = grad_scale.has_value() ? grad_scale->data_ptr() : nullptr;\n float* found_inf_ptr = found_inf.has_value() ? found_inf->data_ptr() : nullptr;\n\n AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, params[0].scalar_type(),\n \"fused_adamw_kernel_cuda\", [&]() {\n multi_tensor_apply_for_fused_optimizer<5>(\n tensor_lists,\n state_steps,\n FusedAdamMathFunctor(),\n lr,\n beta1,\n beta2,\n weight_decay,\n eps,\n maximize,\n /* amsgrad */true,\n grad_scale_ptr,\n found_inf_ptr,\n ADAM_MODE::ADAMW);\n });\n}\n\n} } // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \n\n#include \n#include \n#include \n#include \n#include \n\nnamespace at { namespace native {\n\nvoid _fused_adamw_amsgrad_hip_impl_(\n at::TensorList params,\n at::TensorList grads,\n at::TensorList exp_avgs,\n at::TensorList exp_avg_sqs,\n at::TensorList max_exp_avg_sqs,\n at::TensorList state_steps,\n const double lr,\n const double beta1,\n const double beta2,\n const double weight_decay,\n const double eps,\n const bool maximize,\n const c10::optional& grad_scale,\n const c10::optional& found_inf\n) {\n std::vector> tensor_lists{\n params.vec(), grads.vec(), exp_avgs.vec(), exp_avg_sqs.vec(), max_exp_avg_sqs.vec() };\n\n float* grad_scale_ptr = grad_scale.has_value() ? grad_scale->data_ptr() : nullptr;\n float* found_inf_ptr = found_inf.has_value() ? found_inf->data_ptr() : nullptr;\n\n AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, params[0].scalar_type(),\n \"fused_adamw_kernel_hip\", [&]() {\n multi_tensor_apply_for_fused_optimizer<5>(\n tensor_lists,\n state_steps,\n FusedAdamMathFunctor(),\n lr,\n beta1,\n beta2,\n weight_decay,\n eps,\n maximize,\n /* amsgrad */true,\n grad_scale_ptr,\n found_inf_ptr,\n ADAM_MODE::ADAMW);\n });\n}\n\n} } // namespace at::native\n###" }, { "cuda": "\n#pragma once\n\n#include \n#include \n\n#include \n#include \n#include \n\nnamespace at {\ntemplate <>\ninline __half* Tensor::data() const {\n return reinterpret_cast<__half*>(data());\n}\n} // namespace at\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#pragma once\n\n#include \n#include \n\n#include \n#include \n#include \n\nnamespace at {\ntemplate <>\ninline __half* Tensor::data() const {\n return reinterpret_cast<__half*>(data());\n}\n} // namespace at\n###" }, { "cuda": "\n#pragma once\n#include \n\nnamespace at { namespace native {\n\nvoid _fused_adamw_amsgrad_cuda_impl_(\n at::TensorList params,\n at::TensorList grads,\n at::TensorList exp_avgs,\n at::TensorList exp_avg_sqs,\n at::TensorList max_exp_avg_sqs,\n at::TensorList state_steps,\n const double lr,\n const double beta1,\n const double beta2,\n const double weight_decay,\n const double eps,\n const bool maximize,\n const c10::optional& grad_scale,\n const c10::optional& found_inf\n);\n\n} } // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#pragma once\n#include \n\nnamespace at { namespace native {\n\nvoid _fused_adamw_amsgrad_hip_impl_(\n at::TensorList params,\n at::TensorList grads,\n at::TensorList exp_avgs,\n at::TensorList exp_avg_sqs,\n at::TensorList max_exp_avg_sqs,\n at::TensorList state_steps,\n const double lr,\n const double beta1,\n const double beta2,\n const double weight_decay,\n const double eps,\n const bool maximize,\n const c10::optional& grad_scale,\n const c10::optional& found_inf\n);\n\n} } // namespace at::native\n###" }, { "cuda": "\n#include \n\n#include \n#include \n#include \n#include \n#include \n\nnamespace at { namespace native {\n\nvoid _fused_adamw_cuda_impl_(\n at::TensorList params,\n at::TensorList grads,\n at::TensorList exp_avgs,\n at::TensorList exp_avg_sqs,\n at::TensorList state_steps,\n const double lr,\n const double beta1,\n const double beta2,\n const double weight_decay,\n const double eps,\n const bool maximize,\n const c10::optional& grad_scale,\n const c10::optional& found_inf\n) {\n std::vector> tensor_lists{\n params.vec(), grads.vec(), exp_avgs.vec(), exp_avg_sqs.vec() };\n\n float* grad_scale_ptr = grad_scale.has_value() ? grad_scale->data_ptr() : nullptr;\n float* found_inf_ptr = found_inf.has_value() ? found_inf->data_ptr() : nullptr;\n\n AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, params[0].scalar_type(),\n \"fused_adamw_kernel_cuda\", [&]() {\n multi_tensor_apply_for_fused_optimizer<4>(\n tensor_lists,\n state_steps,\n FusedAdamMathFunctor(),\n lr,\n beta1,\n beta2,\n weight_decay,\n eps,\n maximize,\n /* amsgrad */false,\n grad_scale_ptr,\n found_inf_ptr,\n ADAM_MODE::ADAMW);\n });\n}\n\n} } // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \n\n#include \n#include \n#include \n#include \n#include \n\nnamespace at { namespace native {\n\nvoid _fused_adamw_hip_impl_(\n at::TensorList params,\n at::TensorList grads,\n at::TensorList exp_avgs,\n at::TensorList exp_avg_sqs,\n at::TensorList state_steps,\n const double lr,\n const double beta1,\n const double beta2,\n const double weight_decay,\n const double eps,\n const bool maximize,\n const c10::optional& grad_scale,\n const c10::optional& found_inf\n) {\n std::vector> tensor_lists{\n params.vec(), grads.vec(), exp_avgs.vec(), exp_avg_sqs.vec() };\n\n float* grad_scale_ptr = grad_scale.has_value() ? grad_scale->data_ptr() : nullptr;\n float* found_inf_ptr = found_inf.has_value() ? found_inf->data_ptr() : nullptr;\n\n AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, params[0].scalar_type(),\n \"fused_adamw_kernel_hip\", [&]() {\n multi_tensor_apply_for_fused_optimizer<4>(\n tensor_lists,\n state_steps,\n FusedAdamMathFunctor(),\n lr,\n beta1,\n beta2,\n weight_decay,\n eps,\n maximize,\n /* amsgrad */false,\n grad_scale_ptr,\n found_inf_ptr,\n ADAM_MODE::ADAMW);\n });\n}\n\n} } // namespace at::native\n###" }, { "cuda": "\n#pragma once\n#include \n\nnamespace at { namespace native {\n\nvoid _fused_adamw_cuda_impl_(\n at::TensorList params,\n at::TensorList grads,\n at::TensorList exp_avgs,\n at::TensorList exp_avg_sqs,\n at::TensorList state_steps,\n const double lr,\n const double beta1,\n const double beta2,\n const double weight_decay,\n const double eps,\n const bool maximize,\n const c10::optional& grad_scale,\n const c10::optional& found_inf\n);\n\n} } // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#pragma once\n#include \n\nnamespace at { namespace native {\n\nvoid _fused_adamw_hip_impl_(\n at::TensorList params,\n at::TensorList grads,\n at::TensorList exp_avgs,\n at::TensorList exp_avg_sqs,\n at::TensorList state_steps,\n const double lr,\n const double beta1,\n const double beta2,\n const double weight_decay,\n const double eps,\n const bool maximize,\n const c10::optional& grad_scale,\n const c10::optional& found_inf\n);\n\n} } // namespace at::native\n###" }, { "cuda": "\n#include \n\n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\nvoid _fused_adam_amsgrad_cuda_impl_(\n at::TensorList params,\n at::TensorList grads,\n at::TensorList exp_avgs,\n at::TensorList exp_avg_sqs,\n at::TensorList max_exp_avg_sqs,\n at::TensorList state_steps,\n const double lr,\n const double beta1,\n const double beta2,\n const double weight_decay,\n const double eps,\n const bool maximize,\n const c10::optional& grad_scale,\n const c10::optional& found_inf\n) {\n std::vector> tensor_lists{\n params.vec(), grads.vec(), exp_avgs.vec(), exp_avg_sqs.vec(), max_exp_avg_sqs.vec() };\n\n float* grad_scale_ptr = grad_scale.has_value() ? grad_scale->data_ptr() : nullptr;\n float* found_inf_ptr = found_inf.has_value() ? found_inf->data_ptr() : nullptr;\n\n AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, params[0].scalar_type(),\n \"fused_adam_kernel_cuda\", [&]() {\n multi_tensor_apply_for_fused_optimizer<5>(\n tensor_lists,\n state_steps,\n FusedAdamMathFunctor(),\n lr,\n beta1,\n beta2,\n weight_decay,\n eps,\n maximize,\n /* amsgrad */true,\n grad_scale_ptr,\n found_inf_ptr,\n ADAM_MODE::ORIGINAL);\n });\n}\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \n\n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\nvoid _fused_adam_amsgrad_hip_impl_(\n at::TensorList params,\n at::TensorList grads,\n at::TensorList exp_avgs,\n at::TensorList exp_avg_sqs,\n at::TensorList max_exp_avg_sqs,\n at::TensorList state_steps,\n const double lr,\n const double beta1,\n const double beta2,\n const double weight_decay,\n const double eps,\n const bool maximize,\n const c10::optional& grad_scale,\n const c10::optional& found_inf\n) {\n std::vector> tensor_lists{\n params.vec(), grads.vec(), exp_avgs.vec(), exp_avg_sqs.vec(), max_exp_avg_sqs.vec() };\n\n float* grad_scale_ptr = grad_scale.has_value() ? grad_scale->data_ptr() : nullptr;\n float* found_inf_ptr = found_inf.has_value() ? found_inf->data_ptr() : nullptr;\n\n AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, params[0].scalar_type(),\n \"fused_adam_kernel_hip\", [&]() {\n multi_tensor_apply_for_fused_optimizer<5>(\n tensor_lists,\n state_steps,\n FusedAdamMathFunctor(),\n lr,\n beta1,\n beta2,\n weight_decay,\n eps,\n maximize,\n /* amsgrad */true,\n grad_scale_ptr,\n found_inf_ptr,\n ADAM_MODE::ORIGINAL);\n });\n}\n\n} // namespace at::native\n###" }, { "cuda": "\n#pragma once\n#include \n\nnamespace at { namespace native {\n\nvoid _fused_adam_amsgrad_cuda_impl_(\n at::TensorList params,\n at::TensorList grads,\n at::TensorList exp_avgs,\n at::TensorList exp_avg_sqs,\n at::TensorList max_exp_avg_sqs,\n at::TensorList state_steps,\n const double lr,\n const double beta1,\n const double beta2,\n const double weight_decay,\n const double eps,\n const bool maximize,\n const c10::optional& grad_scale,\n const c10::optional& found_inf\n);\n\n} } // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#pragma once\n#include \n\nnamespace at { namespace native {\n\nvoid _fused_adam_amsgrad_hip_impl_(\n at::TensorList params,\n at::TensorList grads,\n at::TensorList exp_avgs,\n at::TensorList exp_avg_sqs,\n at::TensorList max_exp_avg_sqs,\n at::TensorList state_steps,\n const double lr,\n const double beta1,\n const double beta2,\n const double weight_decay,\n const double eps,\n const bool maximize,\n const c10::optional& grad_scale,\n const c10::optional& found_inf\n);\n\n} } // namespace at::native\n###" }, { "cuda": "\n#include \n\n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\nvoid _fused_adam_cuda_impl_(\n at::TensorList params,\n at::TensorList grads,\n at::TensorList exp_avgs,\n at::TensorList exp_avg_sqs,\n at::TensorList state_steps,\n const double lr,\n const double beta1,\n const double beta2,\n const double weight_decay,\n const double eps,\n const bool maximize,\n const c10::optional& grad_scale,\n const c10::optional& found_inf\n) {\n std::vector> tensor_lists{\n params.vec(), grads.vec(), exp_avgs.vec(), exp_avg_sqs.vec() };\n\n float* grad_scale_ptr = grad_scale.has_value() ? grad_scale->data_ptr() : nullptr;\n float* found_inf_ptr = found_inf.has_value() ? found_inf->data_ptr() : nullptr;\n\n AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, params[0].scalar_type(),\n \"fused_adam_kernel_cuda\", [&]() {\n multi_tensor_apply_for_fused_optimizer<4>(\n tensor_lists,\n state_steps,\n FusedAdamMathFunctor(),\n lr,\n beta1,\n beta2,\n weight_decay,\n eps,\n maximize,\n /* amsgrad */false,\n grad_scale_ptr,\n found_inf_ptr,\n ADAM_MODE::ORIGINAL);\n });\n}\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \n\n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\nvoid _fused_adam_hip_impl_(\n at::TensorList params,\n at::TensorList grads,\n at::TensorList exp_avgs,\n at::TensorList exp_avg_sqs,\n at::TensorList state_steps,\n const double lr,\n const double beta1,\n const double beta2,\n const double weight_decay,\n const double eps,\n const bool maximize,\n const c10::optional& grad_scale,\n const c10::optional& found_inf\n) {\n std::vector> tensor_lists{\n params.vec(), grads.vec(), exp_avgs.vec(), exp_avg_sqs.vec() };\n\n float* grad_scale_ptr = grad_scale.has_value() ? grad_scale->data_ptr() : nullptr;\n float* found_inf_ptr = found_inf.has_value() ? found_inf->data_ptr() : nullptr;\n\n AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, params[0].scalar_type(),\n \"fused_adam_kernel_hip\", [&]() {\n multi_tensor_apply_for_fused_optimizer<4>(\n tensor_lists,\n state_steps,\n FusedAdamMathFunctor(),\n lr,\n beta1,\n beta2,\n weight_decay,\n eps,\n maximize,\n /* amsgrad */false,\n grad_scale_ptr,\n found_inf_ptr,\n ADAM_MODE::ORIGINAL);\n });\n}\n\n} // namespace at::native\n###" }, { "cuda": "\n#pragma once\n#include \n\nnamespace at { namespace native {\n\nvoid _fused_adam_cuda_impl_(\n at::TensorList params,\n at::TensorList grads,\n at::TensorList exp_avgs,\n at::TensorList exp_avg_sqs,\n at::TensorList state_steps,\n const double lr,\n const double beta1,\n const double beta2,\n const double weight_decay,\n const double eps,\n const bool maximize,\n const c10::optional& grad_scale,\n const c10::optional& found_inf\n);\n\n} } // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#pragma once\n#include \n\nnamespace at { namespace native {\n\nvoid _fused_adam_hip_impl_(\n at::TensorList params,\n at::TensorList grads,\n at::TensorList exp_avgs,\n at::TensorList exp_avg_sqs,\n at::TensorList state_steps,\n const double lr,\n const double beta1,\n const double beta2,\n const double weight_decay,\n const double eps,\n const bool maximize,\n const c10::optional& grad_scale,\n const c10::optional& found_inf\n);\n\n} } // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n// NOTE: CUDA on Windows requires that the enclosing function\n// of a __device__ lambda not have internal linkage.\n\nnamespace at::native {\n\n// See note [Jiterator]\nCONSTEXPR_EXCEPT_WIN_CUDA char gcd_name[] = \"gcd\";\nvoid gcd_kernel_cuda(TensorIteratorBase& iter) {\n #if AT_USE_JITERATOR()\n AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), \"gcd_cuda\", [&]() {\n jitted_gpu_kernel(iter, gcd_string);\n });\n #else\n AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), \"gcd_cuda\", [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA (scalar_t a, scalar_t b) -> scalar_t {\n return calc_gcd(a, b);\n });\n });\n #endif // AT_USE_JITERATOR()\n}\n\n// See note [Jiterator]\nCONSTEXPR_EXCEPT_WIN_CUDA char lcm_name[] = \"lcm\";\nvoid lcm_kernel_cuda(TensorIteratorBase& iter) {\n #if AT_USE_JITERATOR()\n AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), \"lcm_cuda\", [&]() {\n jitted_gpu_kernel(iter, lcm_string);\n });\n #else\n AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), \"lcm_cuda\", [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA (scalar_t a, scalar_t b) -> scalar_t {\n scalar_t g = calc_gcd(a, b);\n return (g == 0) ? 0 : ::abs(a / g * b);\n });\n });\n #endif // AT_USE_JITERATOR()\n}\n\nREGISTER_DISPATCH(gcd_stub, &gcd_kernel_cuda);\nREGISTER_DISPATCH(lcm_stub, &lcm_kernel_cuda);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n// NOTE: HIP on Windows requires that the enclosing function\n// of a __device__ lambda not have internal linkage.\n\nnamespace at::native {\n\n// See note [Jiterator]\nCONSTEXPR_EXCEPT_WIN_HIP char gcd_name[] = \"gcd\";\nvoid gcd_kernel_hip(TensorIteratorBase& iter) {\n #if AT_USE_JITERATOR()\n AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), \"gcd_hip\", [&]() {\n jitted_gpu_kernel(iter, gcd_string);\n });\n #else\n AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), \"gcd_hip\", [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA (scalar_t a, scalar_t b) -> scalar_t {\n return calc_gcd(a, b);\n });\n });\n #endif // AT_USE_JITERATOR()\n}\n\n// See note [Jiterator]\nCONSTEXPR_EXCEPT_WIN_HIP char lcm_name[] = \"lcm\";\nvoid lcm_kernel_hip(TensorIteratorBase& iter) {\n #if AT_USE_JITERATOR()\n AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), \"lcm_hip\", [&]() {\n jitted_gpu_kernel(iter, lcm_string);\n });\n #else\n AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), \"lcm_hip\", [&]() {\n gpu_kernel(iter, [] GPU_LAMBDA (scalar_t a, scalar_t b) -> scalar_t {\n scalar_t g = calc_gcd(a, b);\n return (g == 0) ? 0 : ::abs(a / g * b);\n });\n });\n #endif // AT_USE_JITERATOR()\n}\n\nREGISTER_DISPATCH(gcd_stub, &gcd_kernel_hip);\nREGISTER_DISPATCH(lcm_stub, &lcm_kernel_hip);\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_CUDA char hermite_polynomial_h_name[] = \"hermite_polynomial_h_forward\";\n\n void hermite_polynomial_h_kernel_cuda(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"hermite_polynomial_h_cuda\", [&]() {\n opmath_jitted_gpu_kernel_with_scalars(iterator, hermite_polynomial_h_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"hermite_polynomial_h_cuda\", [&]() {\n gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {\n return hermite_polynomial_h_forward(x, n);\n });\n });\n#endif\n } // hermite_polynomial_h_kernel_cuda\n } // namespace (anonymous)\n\n REGISTER_DISPATCH(hermite_polynomial_h_stub, &hermite_polynomial_h_kernel_cuda);\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_HIP char hermite_polynomial_h_name[] = \"hermite_polynomial_h_forward\";\n\n void hermite_polynomial_h_kernel_hip(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"hermite_polynomial_h_hip\", [&]() {\n opmath_jitted_gpu_kernel_with_scalars(iterator, hermite_polynomial_h_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"hermite_polynomial_h_hip\", [&]() {\n gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {\n return hermite_polynomial_h_forward(x, n);\n });\n });\n#endif\n } // hermite_polynomial_h_kernel_hip\n } // namespace (anonymous)\n\n REGISTER_DISPATCH(hermite_polynomial_h_stub, &hermite_polynomial_h_kernel_hip);\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_CUDA char hermite_polynomial_he_name[] = \"hermite_polynomial_he_forward\";\n\n void hermite_polynomial_he_kernel_cuda(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"hermite_polynomial_he_cuda\", [&]() {\n opmath_jitted_gpu_kernel_with_scalars(iterator, hermite_polynomial_he_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"hermite_polynomial_he_cuda\", [&]() {\n gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {\n return hermite_polynomial_he_forward(x, n);\n });\n });\n#endif\n } // hermite_polynomial_he_kernel_cuda\n } // namespace (anonymous)\n\n REGISTER_DISPATCH(hermite_polynomial_he_stub, &hermite_polynomial_he_kernel_cuda);\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_HIP char hermite_polynomial_he_name[] = \"hermite_polynomial_he_forward\";\n\n void hermite_polynomial_he_kernel_hip(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"hermite_polynomial_he_hip\", [&]() {\n opmath_jitted_gpu_kernel_with_scalars(iterator, hermite_polynomial_he_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"hermite_polynomial_he_hip\", [&]() {\n gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {\n return hermite_polynomial_he_forward(x, n);\n });\n });\n#endif\n } // hermite_polynomial_he_kernel_hip\n } // namespace (anonymous)\n\n REGISTER_DISPATCH(hermite_polynomial_he_stub, &hermite_polynomial_he_kernel_hip);\n} // namespace at::native\n###" }, { "cuda": "\n#pragma once\n\n#include \n#include \n#include \n#include \n\n// Collection of in-kernel scan / prefix sum utilities\n\nnamespace at {\nnamespace cuda {\n\n// Inclusive prefix sum for binary vars using intra-warp voting +\n// shared memory\ntemplate \n__device__ void inclusiveBinaryPrefixScan(T* smem, bool in, T* out, BinaryFunction binop) {\n // Within-warp, we use warp voting.\n#if defined (USE_ROCM)\n unsigned long long int vote = WARP_BALLOT(in);\n T index = __popcll(getLaneMaskLe() & vote);\n T carry = __popcll(vote);\n#else\n T vote = WARP_BALLOT(in);\n T index = __popc(getLaneMaskLe() & vote);\n T carry = __popc(vote);\n#endif\n\n int warp = threadIdx.x / C10_WARP_SIZE;\n\n // Per each warp, write out a value\n if (getLaneId() == 0) {\n smem[warp] = carry;\n }\n\n __syncthreads();\n\n // Sum across warps in one thread. This appears to be faster than a\n // warp shuffle scan for CC 3.0+\n if (threadIdx.x == 0) {\n int current = 0;\n for (int i = 0; i < blockDim.x / C10_WARP_SIZE; ++i) {\n T v = smem[i];\n smem[i] = binop(smem[i], current);\n current = binop(current, v);\n }\n }\n\n __syncthreads();\n\n // load the carry from the preceding warp\n if (warp >= 1) {\n index = binop(index, smem[warp - 1]);\n }\n\n *out = index;\n\n if (KillWARDependency) {\n __syncthreads();\n }\n}\n\n// Exclusive prefix sum for binary vars using intra-warp voting +\n// shared memory\ntemplate \n__device__ void exclusiveBinaryPrefixScan(T* smem, bool in, T* out, T* carry, BinaryFunction binop) {\n inclusiveBinaryPrefixScan(smem, in, out, binop);\n\n // Inclusive to exclusive\n *out -= (T) in;\n\n // The outgoing carry for all threads is the last warp's sum\n *carry = smem[at::ceil_div(blockDim.x, C10_WARP_SIZE) - 1];\n\n if (KillWARDependency) {\n __syncthreads();\n }\n}\n\n}} // namespace at::cuda\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#pragma once\n\n#include \n#include \n#include \n#include \n\n// Collection of in-kernel scan / prefix sum utilities\n\nnamespace at {\nnamespace hip {\n\n// Inclusive prefix sum for binary vars using intra-warp voting +\n// shared memory\ntemplate \n__device__ void inclusiveBinaryPrefixScan(T* smem, bool in, T* out, BinaryFunction binop) {\n // Within-warp, we use warp voting.\n#if defined (USE_ROCM)\n unsigned long long int vote = WARP_BALLOT(in);\n T index = __popcll(getLaneMaskLe() & vote);\n T carry = __popcll(vote);\n#else\n T vote = WARP_BALLOT(in);\n T index = __popc(getLaneMaskLe() & vote);\n T carry = __popc(vote);\n#endif\n\n int warp = threadIdx.x / C10_WARP_SIZE;\n\n // Per each warp, write out a value\n if (getLaneId() == 0) {\n smem[warp] = carry;\n }\n\n __syncthreads();\n\n // Sum across warps in one thread. This appears to be faster than a\n // warp shuffle scan for CC 3.0+\n if (threadIdx.x == 0) {\n int current = 0;\n for (int i = 0; i < blockDim.x / C10_WARP_SIZE; ++i) {\n T v = smem[i];\n smem[i] = binop(smem[i], current);\n current = binop(current, v);\n }\n }\n\n __syncthreads();\n\n // load the carry from the preceding warp\n if (warp >= 1) {\n index = binop(index, smem[warp - 1]);\n }\n\n *out = index;\n\n if (KillWARDependency) {\n __syncthreads();\n }\n}\n\n// Exclusive prefix sum for binary vars using intra-warp voting +\n// shared memory\ntemplate \n__device__ void exclusiveBinaryPrefixScan(T* smem, bool in, T* out, T* carry, BinaryFunction binop) {\n inclusiveBinaryPrefixScan(smem, in, out, binop);\n\n // Inclusive to exclusive\n *out -= (T) in;\n\n // The outgoing carry for all threads is the last warp's sum\n *carry = smem[at::ceil_div(blockDim.x, C10_WARP_SIZE) - 1];\n\n if (KillWARDependency) {\n __syncthreads();\n }\n}\n\n}} // namespace at::cuda\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_CUDA char laguerre_polynomial_l_name[] = \"laguerre_polynomial_l_forward\";\n\n void laguerre_polynomial_l_kernel_cuda(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"laguerre_polynomial_l_cuda\", [&]() {\n opmath_jitted_gpu_kernel_with_scalars(iterator, laguerre_polynomial_l_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"laguerre_polynomial_l_cuda\", [&]() {\n gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {\n return laguerre_polynomial_l_forward(x, n);\n });\n });\n#endif\n } // laguerre_polynomial_l_kernel_cuda\n } // namespace (anonymous)\n\n REGISTER_DISPATCH(laguerre_polynomial_l_stub, &laguerre_polynomial_l_kernel_cuda);\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_HIP char laguerre_polynomial_l_name[] = \"laguerre_polynomial_l_forward\";\n\n void laguerre_polynomial_l_kernel_hip(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"laguerre_polynomial_l_hip\", [&]() {\n opmath_jitted_gpu_kernel_with_scalars(iterator, laguerre_polynomial_l_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"laguerre_polynomial_l_hip\", [&]() {\n gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {\n return laguerre_polynomial_l_forward(x, n);\n });\n });\n#endif\n } // laguerre_polynomial_l_kernel_hip\n } // namespace (anonymous)\n\n REGISTER_DISPATCH(laguerre_polynomial_l_stub, &laguerre_polynomial_l_kernel_hip);\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n const char legendre_polynomial_p_name[] = \"legendre_polynomial_p_forward\";\n\n void legendre_polynomial_p_kernel_cuda(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"legendre_polynomial_p_cuda\", [&]() {\n opmath_jitted_gpu_kernel_with_scalars(iterator, legendre_polynomial_p_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"legendre_polynomial_p_cuda\", [&]() {\n gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {\n return legendre_polynomial_p_forward(x, n);\n });\n });\n#endif\n } // legendre_polynomial_p_kernel_cuda\n } // namespace (anonymous)\n\n REGISTER_DISPATCH(legendre_polynomial_p_stub, &legendre_polynomial_p_kernel_cuda);\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n const char legendre_polynomial_p_name[] = \"legendre_polynomial_p_forward\";\n\n void legendre_polynomial_p_kernel_hip(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"legendre_polynomial_p_hip\", [&]() {\n opmath_jitted_gpu_kernel_with_scalars(iterator, legendre_polynomial_p_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"legendre_polynomial_p_hip\", [&]() {\n gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {\n return legendre_polynomial_p_forward(x, n);\n });\n });\n#endif\n } // legendre_polynomial_p_kernel_hip\n } // namespace (anonymous)\n\n REGISTER_DISPATCH(legendre_polynomial_p_stub, &legendre_polynomial_p_kernel_hip);\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n// NOTE: CUDA on Windows requires that the enclosing function\n// of a __device__ lambda not have internal linkage.\n\nnamespace at::native {\n\nvoid logaddexp_kernel_cuda(TensorIteratorBase& iter) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n ScalarType::BFloat16, ScalarType::Half,\n iter.dtype(), \"logaddexp_cuda\",\n [&]() {\n using opmath_t = at::opmath_type;\n gpu_kernel(iter, [] GPU_LAMBDA (scalar_t a_, scalar_t b_) -> scalar_t {\n const auto a = static_cast(a_);\n const auto b = static_cast(b_);\n if (::isinf(a) && a == b) {\n return a;\n } else {\n const auto m = ::max(a, b);\n return m + ::log1p(::exp(-::abs(a - b)));\n }\n });\n });\n}\n\nvoid logaddexp2_kernel_cuda(TensorIteratorBase& iter) {\n AT_DISPATCH_FLOATING_TYPES_AND(\n ScalarType::BFloat16,\n iter.dtype(), \"logaddexp2_cuda\",\n [&]() {\n using opmath_t = at::opmath_type;\n const auto inv_log_2 = static_cast(1.0 / c10::ln_2);\n gpu_kernel(iter, [inv_log_2] GPU_LAMBDA (scalar_t a_, scalar_t b_) -> scalar_t {\n const auto a = static_cast(a_);\n const auto b = static_cast(b_);\n if (::isinf(a) && a == b) {\n return a;\n } else {\n const auto m = ::max(a, b);\n return m + ::log1p(::exp2(-::abs(a - b))) * inv_log_2;\n }\n });\n });\n}\n\nREGISTER_DISPATCH(logaddexp_stub, &logaddexp_kernel_cuda);\nREGISTER_DISPATCH(logaddexp2_stub, &logaddexp2_kernel_cuda);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n// NOTE: HIP on Windows requires that the enclosing function\n// of a __device__ lambda not have internal linkage.\n\nnamespace at::native {\n\nvoid logaddexp_kernel_hip(TensorIteratorBase& iter) {\n AT_DISPATCH_FLOATING_TYPES_AND2(\n ScalarType::BFloat16, ScalarType::Half,\n iter.dtype(), \"logaddexp_hip\",\n [&]() {\n using opmath_t = at::opmath_type;\n gpu_kernel(iter, [] GPU_LAMBDA (scalar_t a_, scalar_t b_) -> scalar_t {\n const auto a = static_cast(a_);\n const auto b = static_cast(b_);\n if (::isinf(a) && a == b) {\n return a;\n } else {\n const auto m = ::max(a, b);\n return m + ::log1p(::exp(-::abs(a - b)));\n }\n });\n });\n}\n\nvoid logaddexp2_kernel_hip(TensorIteratorBase& iter) {\n AT_DISPATCH_FLOATING_TYPES_AND(\n ScalarType::BFloat16,\n iter.dtype(), \"logaddexp2_hip\",\n [&]() {\n using opmath_t = at::opmath_type;\n const auto inv_log_2 = static_cast(1.0 / c10::ln_2);\n gpu_kernel(iter, [inv_log_2] GPU_LAMBDA (scalar_t a_, scalar_t b_) -> scalar_t {\n const auto a = static_cast(a_);\n const auto b = static_cast(b_);\n if (::isinf(a) && a == b) {\n return a;\n } else {\n const auto m = ::max(a, b);\n return m + ::log1p(::exp2(-::abs(a - b))) * inv_log_2;\n }\n });\n });\n}\n\nREGISTER_DISPATCH(logaddexp_stub, &logaddexp_kernel_hip);\nREGISTER_DISPATCH(logaddexp2_stub, &logaddexp2_kernel_hip);\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_CUDA char modified_bessel_i0_name[] = \"modified_bessel_i0_forward\";\n\n void modified_bessel_i0_kernel_cuda(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"modified_bessel_i0_cuda\", [&]() {\n jitted_gpu_kernel(iterator, modified_bessel_i0_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"modified_bessel_i0_cuda\", [&]() {\n gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {\n return modified_bessel_i0_forward(a);\n });\n });\n#endif // AT_USE_JITERATOR()\n }\n }\n\n REGISTER_DISPATCH(special_modified_bessel_i0_stub, &modified_bessel_i0_kernel_cuda);\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_HIP char modified_bessel_i0_name[] = \"modified_bessel_i0_forward\";\n\n void modified_bessel_i0_kernel_hip(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"modified_bessel_i0_hip\", [&]() {\n jitted_gpu_kernel(iterator, modified_bessel_i0_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"modified_bessel_i0_hip\", [&]() {\n gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {\n return modified_bessel_i0_forward(a);\n });\n });\n#endif // AT_USE_JITERATOR()\n }\n }\n\n REGISTER_DISPATCH(special_modified_bessel_i0_stub, &modified_bessel_i0_kernel_hip);\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_CUDA char modified_bessel_i1_name[] = \"modified_bessel_i1_forward\";\n\n void modified_bessel_i1_kernel_cuda(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"modified_bessel_i1_cuda\", [&]() {\n jitted_gpu_kernel(iterator, modified_bessel_i1_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"modified_bessel_i1_cuda\", [&]() {\n gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {\n return modified_bessel_i1_forward(a);\n });\n });\n#endif // AT_USE_JITERATOR()\n }\n }\n\n REGISTER_DISPATCH(special_modified_bessel_i1_stub, &modified_bessel_i1_kernel_cuda);\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_HIP char modified_bessel_i1_name[] = \"modified_bessel_i1_forward\";\n\n void modified_bessel_i1_kernel_hip(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"modified_bessel_i1_hip\", [&]() {\n jitted_gpu_kernel(iterator, modified_bessel_i1_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"modified_bessel_i1_hip\", [&]() {\n gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {\n return modified_bessel_i1_forward(a);\n });\n });\n#endif // AT_USE_JITERATOR()\n }\n }\n\n REGISTER_DISPATCH(special_modified_bessel_i1_stub, &modified_bessel_i1_kernel_hip);\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_CUDA char modified_bessel_k0_name[] = \"modified_bessel_k0_forward\";\n\n void modified_bessel_k0_kernel_cuda(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"modified_bessel_k0_cuda\", [&]() {\n jitted_gpu_kernel(iterator, modified_bessel_k0_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"modified_bessel_k0_cuda\", [&]() {\n gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {\n return modified_bessel_k0_forward(a);\n });\n });\n#endif // AT_USE_JITERATOR()\n }\n }\n\n REGISTER_DISPATCH(special_modified_bessel_k0_stub, &modified_bessel_k0_kernel_cuda);\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_HIP char modified_bessel_k0_name[] = \"modified_bessel_k0_forward\";\n\n void modified_bessel_k0_kernel_hip(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"modified_bessel_k0_hip\", [&]() {\n jitted_gpu_kernel(iterator, modified_bessel_k0_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"modified_bessel_k0_hip\", [&]() {\n gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {\n return modified_bessel_k0_forward(a);\n });\n });\n#endif // AT_USE_JITERATOR()\n }\n }\n\n REGISTER_DISPATCH(special_modified_bessel_k0_stub, &modified_bessel_k0_kernel_hip);\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_CUDA char modified_bessel_k1_name[] = \"modified_bessel_k1_forward\";\n\n void modified_bessel_k1_kernel_cuda(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"modified_bessel_k1_cuda\", [&]() {\n jitted_gpu_kernel(iterator, modified_bessel_k1_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"modified_bessel_k1_cuda\", [&]() {\n gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {\n return modified_bessel_k1_forward(a);\n });\n });\n#endif // AT_USE_JITERATOR()\n }\n }\n\n REGISTER_DISPATCH(special_modified_bessel_k1_stub, &modified_bessel_k1_kernel_cuda);\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n\n#include \n\n#include \n\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n namespace {\n CONSTEXPR_EXCEPT_WIN_HIP char modified_bessel_k1_name[] = \"modified_bessel_k1_forward\";\n\n void modified_bessel_k1_kernel_hip(TensorIteratorBase& iterator) {\n#if AT_USE_JITERATOR()\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"modified_bessel_k1_hip\", [&]() {\n jitted_gpu_kernel(iterator, modified_bessel_k1_string);\n });\n#else\n AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), \"modified_bessel_k1_hip\", [&]() {\n gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {\n return modified_bessel_k1_forward(a);\n });\n });\n#endif // AT_USE_JITERATOR()\n }\n }\n\n REGISTER_DISPATCH(special_modified_bessel_k1_stub, &modified_bessel_k1_kernel_hip);\n} // namespace at::native\n###" }, { "cuda": "\n#pragma once\n#include \n#include \n\nnamespace at { namespace native {\n\nnamespace {\n\n\n// SFINAE doesn't work well with NVCC under Windows for math functions like pow and sqrt.\n// So we need to define the functions with the explicit function signatures.\n// As for pow, the following signatures are defined as the device function:\n// pow(float, int)\n// pow(double, int)\n// pow(float, float)\n// pow(double, double)\n#ifdef _MSC_VER\n// Functions for pow\n// pow for at::Half\nstatic inline __host__ __device__ at::Half pow_(at::Half base, at::Half exp) {\n return static_cast(std::pow(static_cast(base), static_cast(exp)));\n}\n// pow for at::BFloat16\nstatic inline __host__ __device__ at::BFloat16 pow_(at::BFloat16 base, at::BFloat16 exp) {\n return static_cast(std::pow(static_cast(base), static_cast(exp)));\n}\n// pow (floating, floating/int)\ntemplate \nstatic inline __host__ __device__ typename std::enable_if::value && (std::is_same::value || std::is_same::value), Base_type>::type\n pow_(Base_type base, Exp_type exp) {\n return std::pow(base, exp);\n}\n// pow (Otherwise)\ntemplate \nstatic inline __host__ __device__ typename std::enable_if::value && !std::is_same::value, Base_type>::type\n pow_(Base_type base, Exp_type exp) {\n return static_cast(std::pow(static_cast(base), static_cast(exp)));\n}\n#else\ntemplate \nstatic inline __host__ __device__ Base_type pow_(Base_type base, Exp_type exp) {\n return ::pow(base, exp);\n}\n#endif\n\ntemplate \nstatic inline __host__ __device__ std::enable_if_t::value, T> pow_(\n T base, T exp) {\n return at::native::powi(base, exp);\n}\n\ntemplate \nstatic inline __host__ __device__ c10::complex pow_(c10::complex base, c10::complex exp) {\n return c10_complex_math::pow(base, exp);\n}\n\n} // namespace\n}} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#pragma once\n#include \n#include \n\nnamespace at { namespace native {\n\nnamespace {\n\n\n// SFINAE doesn't work well with NVCC under Windows for math functions like pow and sqrt.\n// So we need to define the functions with the explicit function signatures.\n// As for pow, the following signatures are defined as the device function:\n// pow(float, int)\n// pow(double, int)\n// pow(float, float)\n// pow(double, double)\n#ifdef _MSC_VER\n// Functions for pow\n// pow for at::Half\nstatic inline __host__ __device__ at::Half pow_(at::Half base, at::Half exp) {\n return static_cast(::pow(static_cast(base), static_cast(exp)));\n}\n// pow for at::BFloat16\nstatic inline __host__ __device__ at::BFloat16 pow_(at::BFloat16 base, at::BFloat16 exp) {\n return static_cast(::pow(static_cast(base), static_cast(exp)));\n}\n// pow (floating, floating/int)\ntemplate \nstatic inline __host__ __device__ typename std::enable_if::value && (std::is_same::value || std::is_same::value), Base_type>::type\n pow_(Base_type base, Exp_type exp) {\n return ::pow(base, exp);\n}\n// pow (Otherwise)\ntemplate \nstatic inline __host__ __device__ typename std::enable_if::value && !std::is_same::value, Base_type>::type\n pow_(Base_type base, Exp_type exp) {\n return static_cast(::pow(static_cast(base), static_cast(exp)));\n}\n#else\ntemplate \nstatic inline __host__ __device__ Base_type pow_(Base_type base, Exp_type exp) {\n return ::pow(base, exp);\n}\n#endif\n\ntemplate \nstatic inline __host__ __device__ std::enable_if_t::value, T> pow_(\n T base, T exp) {\n return at::native::powi(base, exp);\n}\n\ntemplate \nstatic inline __host__ __device__ c10::complex pow_(c10::complex base, c10::complex exp) {\n return c10_complex_math::pow(base, exp);\n}\n\n} // namespace\n}} // namespace at::native\n###" }, { "cuda": "\n#include \n#include \n#include \n\n#include \n#include \n#include \n\nnamespace {\n\n// See note [Algorithm of randperm]\ntemplate\n__global__ void randperm_handle_duplicate_keys_kernel(T *keys, scalar_t *data, T mask, int n, at::PhiloxCudaState philox_args) {\n int tid = threadIdx.x + blockDim.x * blockIdx.x;\n\n // find the beginning of islands\n if (tid >= n - 1) return; // out of range\n if ((keys[tid] & mask) != (keys[tid + 1] & mask)) return; // not in an island\n if (tid != 0 && (keys[tid] & mask) == (keys[tid - 1] & mask)) return; // not the beginning of an island\n\n // find the size of islands\n int island_size = 0;\n do { island_size++; }\n while ((tid + island_size < n) && (keys[tid + island_size] & mask) == (keys[tid] & mask));\n\n // do random permutation inside each island.\n data += tid;\n auto seeds = at::cuda::philox::unpack(philox_args);\n curandStatePhilox4_32_10_t state;\n curand_init(std::get<0>(seeds), tid, std::get<1>(seeds), &state);\n for (int i = island_size - 1; i > 0; i--) {\n unsigned int r = curand(&state) % (i + 1);\n if (i != r) {\n scalar_t tmp = data[i];\n data[i] = data[r];\n data[r] = tmp;\n }\n }\n}\n\n// See note [Algorithm of randperm]\ntemplate\nvoid randperm_handle_duplicate_keys(T *keys, scalar_t *data, int bits, int64_t n, c10::optional &gen_) {\n auto gen = at::get_generator_or_default(gen_, at::cuda::detail::getDefaultCUDAGenerator());\n int64_t counter_offset = n;\n at::PhiloxCudaState rng_engine_inputs;\n {\n // See Note [Acquire lock when using random generators]\n std::lock_guard lock(gen->mutex_);\n rng_engine_inputs = gen->philox_cuda_state(counter_offset);\n }\n T mask = static_cast((1UL << bits) - 1);\n randperm_handle_duplicate_keys_kernel<<<(n + 511) / 512, 512, 0, at::cuda::getCurrentCUDAStream()>>>(\n keys, data, mask, n, rng_engine_inputs);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n}\n\n}\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \n#include \n#include \n\n#include \n#include \n#include \n\nnamespace {\n\n// See note [Algorithm of randperm]\ntemplate\n__global__ void randperm_handle_duplicate_keys_kernel(T *keys, scalar_t *data, T mask, int n, at::PhiloxHipState philox_args) {\n int tid = threadIdx.x + blockDim.x * blockIdx.x;\n\n // find the beginning of islands\n if (tid >= n - 1) return; // out of range\n if ((keys[tid] & mask) != (keys[tid + 1] & mask)) return; // not in an island\n if (tid != 0 && (keys[tid] & mask) == (keys[tid - 1] & mask)) return; // not the beginning of an island\n\n // find the size of islands\n int island_size = 0;\n do { island_size++; }\n while ((tid + island_size < n) && (keys[tid + island_size] & mask) == (keys[tid] & mask));\n\n // do random permutation inside each island.\n data += tid;\n auto seeds = at::cuda::philox::unpack(philox_args);\n hiprandStatePhilox4_32_10_t state;\n hiprand_init(std::get<0>(seeds), tid, std::get<1>(seeds), &state);\n for (int i = island_size - 1; i > 0; i--) {\n unsigned int r = hiprand(&state) % (i + 1);\n if (i != r) {\n scalar_t tmp = data[i];\n data[i] = data[r];\n data[r] = tmp;\n }\n }\n}\n\n// See note [Algorithm of randperm]\ntemplate\nvoid randperm_handle_duplicate_keys(T *keys, scalar_t *data, int bits, int64_t n, c10::optional &gen_) {\n auto gen = at::get_generator_or_default(gen_, at::cuda::detail::getDefaultHIPGenerator());\n int64_t counter_offset = n;\n at::PhiloxHipState rng_engine_inputs;\n {\n // See Note [Acquire lock when using random generators]\n std::lock_guard lock(gen->mutex_);\n rng_engine_inputs = gen->philox_hip_state(counter_offset);\n }\n T mask = static_cast((1UL << bits) - 1);\n hipLaunchKernelGGL(( randperm_handle_duplicate_keys_kernel), dim3((n + 511) / 512), dim3(512), 0, at::hip::getCurrentHIPStream(), \n keys, data, mask, n, rng_engine_inputs);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n}\n\n}\n###" }, { "cuda": "\n#define TORCH_ASSERT_ONLY_METHOD_OPERATORS\n#include \n#include \n\n#ifndef AT_PER_OPERATOR_HEADERS\n#include \n#else\n#include \n#endif\n\nnamespace at::native {\nvoid record_stream_cuda(Tensor& self, c10::Stream stream) {\n struct c10::StreamData3 data = stream.pack3();\n c10::cuda::CUDACachingAllocator::recordStream(self.storage().data_ptr(), at::cuda::CUDAStream::unpack3(data.stream_id, data.device_index, data.device_type));\n}\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_ONLY_METHOD_OPERATORS\n#include \n#include \n\n#ifndef AT_PER_OPERATOR_HEADERS\n#include \n#else\n#include \n#endif\n\nnamespace at::native {\nvoid record_stream_hip(Tensor& self, c10::Stream stream) {\n struct c10::StreamData3 data = stream.pack3();\n c10::hip::HIPCachingAllocator::recordStream(self.storage().data_ptr(), at::hip::HIPStream::unpack3(data.stream_id, data.device_index, data.device_type));\n}\n} // namespace at::native\n###" }, { "cuda": "\n#include \n\n#include \n#include \n\nnamespace at {\nnamespace cuda {\nnamespace {\n__global__ void spin_kernel(int64_t cycles) {\n // Few AMD specific GPUs have different clock intrinsic\n#if defined(__GFX11__) && defined(USE_ROCM) && !defined(__CUDA_ARCH__)\n int64_t start_clock = wall_clock64();\n#else\n // see concurrentKernels CUDA sampl\n int64_t start_clock = clock64();\n#endif\n int64_t clock_offset = 0;\n while (clock_offset < cycles)\n {\n#if defined(__GFX11__) && defined(USE_ROCM) && !defined(__CUDA_ARCH__)\n clock_offset = wall_clock64() - start_clock;\n#else\n clock_offset = clock64() - start_clock;\n#endif\n }\n}\n}\n\nvoid sleep(int64_t cycles) {\n dim3 grid(1);\n dim3 block(1);\n spin_kernel<<>>(cycles);\n C10_CUDA_KERNEL_LAUNCH_CHECK();\n}\n\n}} // namespace at::cuda\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \"hip/hip_runtime.h\"\n#include \n\n#include \n#include \n\nnamespace at {\nnamespace hip {\nnamespace {\n__global__ void spin_kernel(int64_t cycles) {\n // Few AMD specific GPUs have different clock intrinsic\n#if defined(__GFX11__) && defined(USE_ROCM) && !defined(__HIP_ARCH__)\n int64_t start_clock = wall_clock64();\n#else\n // see concurrentKernels HIP sampl\n int64_t start_clock = clock64();\n#endif\n int64_t clock_offset = 0;\n while (clock_offset < cycles)\n {\n#if defined(__GFX11__) && defined(USE_ROCM) && !defined(__HIP_ARCH__)\n clock_offset = wall_clock64() - start_clock;\n#else\n clock_offset = clock64() - start_clock;\n#endif\n }\n}\n}\n\nvoid sleep(int64_t cycles) {\n dim3 grid(1);\n dim3 block(1);\n hipLaunchKernelGGL(( spin_kernel), dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStream(), cycles);\n C10_HIP_KERNEL_LAUNCH_CHECK();\n}\n\n}} // namespace at::cuda\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n\n#include \n\n\nnamespace at::native {\n\nstatic inline std::ostream& operator<<(std::ostream& out, dim3 dim) {\n if (dim.y == 1 && dim.z == 1) {\n out << dim.x;\n } else {\n out << \"[\" << dim.x << \",\" << dim.y << \",\" << dim.z << \"]\";\n }\n return out;\n}\n\nstd::ostream& operator<<(std::ostream& out, const ReduceConfig& config) {\n out << \"ReduceConfig(\";\n out << \"element_size_bytes=\" << config.element_size_bytes << \", \";\n out << \"num_inputs=\" << config.num_inputs << \", \";\n out << \"num_outputs=\" << config.num_outputs << \", \";\n out << \"step_input=\" << config.step_input << \", \";\n out << \"step_output=\" << config.step_output << \", \";\n out << \"ctas_per_output=\" << config.ctas_per_output << \", \";\n out << \"input_mult=[\";\n for (int i = 0; i < 3; i++) {\n if (i != 0) {\n out << \",\";\n }\n out << config.input_mult[i];\n }\n out << \"], \";\n out << \"output_mult=[\";\n for (int i = 0; i < 2; i++) {\n if (i != 0) {\n out << \",\";\n }\n out << config.output_mult[i];\n }\n out << \"], \";\n out << \"vectorize_input=\" << config.vectorize_input << \", \";\n out << \"output_vec_size=\" << config.output_vec_size << \", \";\n out << \"block_width=\" << config.block_width << \", \";\n out << \"block_height=\" << config.block_height << \", \";\n out << \"num_threads=\" << config.num_threads << \", \";\n out << \"values_per_thread=\" << config.values_per_thread() << \", \";\n out << \"block=\" << config.block() << \", \";\n out << \"grid=\" << config.grid() << \", \";\n out << \"global_memory_size=\" << config.global_memory_size();\n out << \")\";\n return out;\n}\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n\n#include \n\n\nnamespace at::native {\n\nstatic inline std::ostream& operator<<(std::ostream& out, dim3 dim) {\n if (dim.y == 1 && dim.z == 1) {\n out << dim.x;\n } else {\n out << \"[\" << dim.x << \",\" << dim.y << \",\" << dim.z << \"]\";\n }\n return out;\n}\n\nstd::ostream& operator<<(std::ostream& out, const ReduceConfig& config) {\n out << \"ReduceConfig(\";\n out << \"element_size_bytes=\" << config.element_size_bytes << \", \";\n out << \"num_inputs=\" << config.num_inputs << \", \";\n out << \"num_outputs=\" << config.num_outputs << \", \";\n out << \"step_input=\" << config.step_input << \", \";\n out << \"step_output=\" << config.step_output << \", \";\n out << \"ctas_per_output=\" << config.ctas_per_output << \", \";\n out << \"input_mult=[\";\n for (int i = 0; i < 3; i++) {\n if (i != 0) {\n out << \",\";\n }\n out << config.input_mult[i];\n }\n out << \"], \";\n out << \"output_mult=[\";\n for (int i = 0; i < 2; i++) {\n if (i != 0) {\n out << \",\";\n }\n out << config.output_mult[i];\n }\n out << \"], \";\n out << \"vectorize_input=\" << config.vectorize_input << \", \";\n out << \"output_vec_size=\" << config.output_vec_size << \", \";\n out << \"block_width=\" << config.block_width << \", \";\n out << \"block_height=\" << config.block_height << \", \";\n out << \"num_threads=\" << config.num_threads << \", \";\n out << \"values_per_thread=\" << config.values_per_thread() << \", \";\n out << \"block=\" << config.block() << \", \";\n out << \"grid=\" << config.grid() << \", \";\n out << \"global_memory_size=\" << config.global_memory_size();\n out << \")\";\n return out;\n}\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n\nnamespace at::native {\n\ntemplate \nvoid _min_max_values_kernel_cuda_impl(TensorIterator& iter) {\n gpu_reduce_kernel(\n iter,\n MinMaxOps{},\n thrust::pair(\n at::numeric_limits::upper_bound(),\n at::numeric_limits::lower_bound()));\n}\n\nvoid aminmax_allreduce_launch_kernel(TensorIterator& iter) {\n AT_DISPATCH_ALL_TYPES_AND3(\n kBFloat16, kHalf, kBool, iter.input_dtype(), \"aminmax_all_cuda\", [&] {\n _min_max_values_kernel_cuda_impl(iter);\n });\n}\n\nvoid aminmax_launch_kernel(TensorIterator& iter) {\n AT_DISPATCH_ALL_TYPES_AND3(\n kBFloat16, kHalf, kBool, iter.input_dtype(), \"aminmax_cuda\", [&]() {\n gpu_reduce_kernel(\n iter,\n MinMaxOps{},\n thrust::pair(\n at::numeric_limits::upper_bound(),\n at::numeric_limits::lower_bound()));\n });\n}\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n\nnamespace at::native {\n\ntemplate \nvoid _min_max_values_kernel_hip_impl(TensorIterator& iter) {\n gpu_reduce_kernel(\n iter,\n MinMaxOps{},\n thrust::pair(\n at::numeric_limits::upper_bound(),\n at::numeric_limits::lower_bound()));\n}\n\nvoid aminmax_allreduce_launch_kernel(TensorIterator& iter) {\n AT_DISPATCH_ALL_TYPES_AND3(\n kBFloat16, kHalf, kBool, iter.input_dtype(), \"aminmax_all_hip\", [&] {\n _min_max_values_kernel_hip_impl(iter);\n });\n}\n\nvoid aminmax_launch_kernel(TensorIterator& iter) {\n AT_DISPATCH_ALL_TYPES_AND3(\n kBFloat16, kHalf, kBool, iter.input_dtype(), \"aminmax_hip\", [&]() {\n gpu_reduce_kernel(\n iter,\n MinMaxOps{},\n thrust::pair(\n at::numeric_limits::upper_bound(),\n at::numeric_limits::lower_bound()));\n });\n}\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n\nnamespace at::native {\n\ntemplate \nvoid argmax_kernel_cuda_impl(TensorIterator& iter) {\n gpu_reduce_kernel(\n iter,\n ArgMaxOps{},\n thrust::pair(\n at::numeric_limits::lower_bound(), 0));\n};\n\nvoid argmax_kernel_cuda(TensorIterator& iter) {\n // For float16 & bfloat16, instead of implementing is_nan and warp_shfl_down,\n // we can convert float16 & bfloat16 to float and do all the operations in\n // float.\n if (iter.dtype(1) == kHalf) {\n argmax_kernel_cuda_impl(iter);\n } else if (iter.dtype(1) == kBFloat16) {\n argmax_kernel_cuda_impl(iter);\n } else {\n AT_DISPATCH_ALL_TYPES(iter.dtype(1), \"argmax_cuda\", [&]() {\n argmax_kernel_cuda_impl(iter);\n });\n }\n}\n\nREGISTER_DISPATCH(argmax_stub, &argmax_kernel_cuda);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n\nnamespace at::native {\n\ntemplate \nvoid argmax_kernel_hip_impl(TensorIterator& iter) {\n gpu_reduce_kernel(\n iter,\n ArgMaxOps{},\n thrust::pair(\n at::numeric_limits::lower_bound(), 0));\n};\n\nvoid argmax_kernel_hip(TensorIterator& iter) {\n // For float16 & bfloat16, instead of implementing is_nan and warp_shfl_down,\n // we can convert float16 & bfloat16 to float and do all the operations in\n // float.\n if (iter.dtype(1) == kHalf) {\n argmax_kernel_hip_impl(iter);\n } else if (iter.dtype(1) == kBFloat16) {\n argmax_kernel_hip_impl(iter);\n } else {\n AT_DISPATCH_ALL_TYPES(iter.dtype(1), \"argmax_hip\", [&]() {\n argmax_kernel_hip_impl(iter);\n });\n }\n}\n\nREGISTER_DISPATCH(argmax_stub, &argmax_kernel_hip);\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n\nnamespace at::native {\n\ntemplate \nvoid argmin_kernel_cuda_impl(TensorIterator& iter) {\n gpu_reduce_kernel(\n iter,\n ArgMinOps{},\n thrust::pair(\n at::numeric_limits::upper_bound(), 0));\n};\n\nvoid argmin_kernel_cuda(TensorIterator& iter) {\n // For float16 & bfloat16, instead of implementing is_nan and warp_shfl_down,\n // we can convert float16 & bfloat16 to float and do all the operations in\n // float.\n if (iter.dtype(1) == kHalf) {\n argmin_kernel_cuda_impl(iter);\n } else if (iter.dtype(1) == kBFloat16) {\n argmin_kernel_cuda_impl(iter);\n } else {\n AT_DISPATCH_ALL_TYPES(iter.dtype(1), \"argmin_cuda\", [&]() {\n argmin_kernel_cuda_impl(iter);\n });\n }\n}\n\nREGISTER_DISPATCH(argmin_stub, &argmin_kernel_cuda);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n\nnamespace at::native {\n\ntemplate \nvoid argmin_kernel_hip_impl(TensorIterator& iter) {\n gpu_reduce_kernel(\n iter,\n ArgMinOps{},\n thrust::pair(\n at::numeric_limits::upper_bound(), 0));\n};\n\nvoid argmin_kernel_hip(TensorIterator& iter) {\n // For float16 & bfloat16, instead of implementing is_nan and warp_shfl_down,\n // we can convert float16 & bfloat16 to float and do all the operations in\n // float.\n if (iter.dtype(1) == kHalf) {\n argmin_kernel_hip_impl(iter);\n } else if (iter.dtype(1) == kBFloat16) {\n argmin_kernel_hip_impl(iter);\n } else {\n AT_DISPATCH_ALL_TYPES(iter.dtype(1), \"argmin_hip\", [&]() {\n argmin_kernel_hip_impl(iter);\n });\n }\n}\n\nREGISTER_DISPATCH(argmin_stub, &argmin_kernel_hip);\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\nvoid and_kernel_cuda(TensorIterator& iter) {\n AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(\n kHalf, kBFloat16, kBool, iter.common_dtype(), \"and_cuda\", [&]() {\n gpu_reduce_kernel(\n iter,\n func_wrapper([] GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {\n return (static_cast(a) && static_cast(b));\n }),\n true);\n });\n}\n\nvoid or_kernel_cuda(TensorIterator& iter) {\n AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(\n kHalf, kBFloat16, kBool, iter.common_dtype(), \"or_cuda\", [&]() {\n gpu_reduce_kernel(\n iter,\n func_wrapper([] GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {\n return (static_cast(a) || static_cast(b));\n }),\n false);\n });\n}\n\nREGISTER_DISPATCH(and_stub, &and_kernel_cuda);\nREGISTER_DISPATCH(or_stub, &or_kernel_cuda);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\nvoid and_kernel_hip(TensorIterator& iter) {\n AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(\n kHalf, kBFloat16, kBool, iter.common_dtype(), \"and_hip\", [&]() {\n gpu_reduce_kernel(\n iter,\n func_wrapper([] GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {\n return (static_cast(a) && static_cast(b));\n }),\n true);\n });\n}\n\nvoid or_kernel_hip(TensorIterator& iter) {\n AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(\n kHalf, kBFloat16, kBool, iter.common_dtype(), \"or_hip\", [&]() {\n gpu_reduce_kernel(\n iter,\n func_wrapper([] GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {\n return (static_cast(a) || static_cast(b));\n }),\n false);\n });\n}\n\nREGISTER_DISPATCH(and_stub, &and_kernel_hip);\nREGISTER_DISPATCH(or_stub, &or_kernel_hip);\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n\nnamespace at::native {\n\ntemplate \nstruct MaxNanFunctor {\n __device__ __forceinline__ acc_t operator()(acc_t a, acc_t b) const {\n return (at::_isnan(a) || a > b) ? a : b;\n }\n};\n\ntemplate \nvoid max_values_kernel_cuda_impl(TensorIterator& iter) {\n gpu_reduce_kernel(\n iter,\n func_wrapper(MaxNanFunctor()),\n at::numeric_limits::lower_bound());\n}\n\nvoid max_values_kernel_cuda(TensorIterator& iter) {\n AT_DISPATCH_ALL_TYPES_AND3(\n kBFloat16, kHalf, kBool, iter.dtype(), \"max_values_cuda\", [&]() {\n max_values_kernel_cuda_impl(iter);\n });\n}\n\nvoid max_launch_kernel(TensorIterator& iter) {\n AT_DISPATCH_ALL_TYPES_AND3(\n kBFloat16, kHalf, kBool, iter.input_dtype(), \"max_cuda\", [&]() {\n gpu_reduce_kernel(\n iter,\n MaxOps{},\n thrust::pair(\n at::numeric_limits::lower_bound(), 0));\n });\n}\n\nvoid max_all_launch_kernel(TensorIterator &iter) {\n AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.input_dtype(), \"max_all_cuda\", [&] {\n max_values_kernel_cuda_impl(iter);\n });\n}\n\nREGISTER_DISPATCH(max_values_stub, &max_values_kernel_cuda);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n\nnamespace at::native {\n\ntemplate \nstruct MaxNanFunctor {\n __device__ __forceinline__ acc_t operator()(acc_t a, acc_t b) const {\n return (at::_isnan(a) || a > b) ? a : b;\n }\n};\n\ntemplate \nvoid max_values_kernel_hip_impl(TensorIterator& iter) {\n gpu_reduce_kernel(\n iter,\n func_wrapper(MaxNanFunctor()),\n at::numeric_limits::lower_bound());\n}\n\nvoid max_values_kernel_hip(TensorIterator& iter) {\n AT_DISPATCH_ALL_TYPES_AND3(\n kBFloat16, kHalf, kBool, iter.dtype(), \"max_values_hip\", [&]() {\n max_values_kernel_hip_impl(iter);\n });\n}\n\nvoid max_launch_kernel(TensorIterator& iter) {\n AT_DISPATCH_ALL_TYPES_AND3(\n kBFloat16, kHalf, kBool, iter.input_dtype(), \"max_hip\", [&]() {\n gpu_reduce_kernel(\n iter,\n MaxOps{},\n thrust::pair(\n at::numeric_limits::lower_bound(), 0));\n });\n}\n\nvoid max_all_launch_kernel(TensorIterator &iter) {\n AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.input_dtype(), \"max_all_hip\", [&] {\n max_values_kernel_hip_impl(iter);\n });\n}\n\nREGISTER_DISPATCH(max_values_stub, &max_values_kernel_hip);\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n\n\nnamespace at::native {\n\ntemplate \nstruct MinNanFunctor {\n __device__ __forceinline__ acc_t operator()(acc_t a, acc_t b) const {\n return (at::_isnan(a) || a < b) ? a : b;\n }\n};\n\ntemplate \nvoid min_values_kernel_cuda_impl(TensorIterator& iter) {\n gpu_reduce_kernel(\n iter, func_wrapper (MinNanFunctor()),\n at::numeric_limits::upper_bound());\n}\n\nvoid min_values_kernel_cuda(TensorIterator& iter) {\n AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.dtype(), \"min_values_cuda\", [&]() {\n min_values_kernel_cuda_impl(iter);\n });\n}\n\nvoid min_launch_kernel(TensorIterator &iter) {\n AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.input_dtype(), \"min_cuda\", [&]() {\n gpu_reduce_kernel(\n iter,\n MinOps{},\n thrust::pair(at::numeric_limits::upper_bound(), 0));\n });\n}\n\nvoid min_all_launch_kernel(TensorIterator &iter) {\n AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.input_dtype(), \"min_all_cuda\", [&] {\n min_values_kernel_cuda_impl(iter);\n });\n}\n\nREGISTER_DISPATCH(min_values_stub, &min_values_kernel_cuda);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\n#include \n#include \n#include \n\n\nnamespace at::native {\n\ntemplate \nstruct MinNanFunctor {\n __device__ __forceinline__ acc_t operator()(acc_t a, acc_t b) const {\n return (at::_isnan(a) || a < b) ? a : b;\n }\n};\n\ntemplate \nvoid min_values_kernel_hip_impl(TensorIterator& iter) {\n gpu_reduce_kernel(\n iter, func_wrapper (MinNanFunctor()),\n at::numeric_limits::upper_bound());\n}\n\nvoid min_values_kernel_hip(TensorIterator& iter) {\n AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.dtype(), \"min_values_hip\", [&]() {\n min_values_kernel_hip_impl(iter);\n });\n}\n\nvoid min_launch_kernel(TensorIterator &iter) {\n AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.input_dtype(), \"min_hip\", [&]() {\n gpu_reduce_kernel(\n iter,\n MinOps{},\n thrust::pair(at::numeric_limits::upper_bound(), 0));\n });\n}\n\nvoid min_all_launch_kernel(TensorIterator &iter) {\n AT_DISPATCH_ALL_TYPES_AND3(kBFloat16, kHalf, kBool, iter.input_dtype(), \"min_all_hip\", [&] {\n min_values_kernel_hip_impl(iter);\n });\n}\n\nREGISTER_DISPATCH(min_values_stub, &min_values_kernel_hip);\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \nnamespace at::native {\ntemplate \nvoid std_var_kernel_impl(TensorIterator& iter, double correction, bool take_sqrt) {\n \n \n using accscalar_t = at::acc_type;\n using ops_t = WelfordOps>;\n ops_t ops(static_cast(correction), take_sqrt);\n gpu_reduce_kernel(iter, ops, typename ops_t::acc_t{});\n}\nstatic void std_var_kernel_cuda(TensorIterator& iter, double correction, bool take_sqrt) {\n const auto input_dtype = iter.input_dtype();\n if (input_dtype == kHalf && iter.dtype() == kFloat) {\n \n std_var_kernel_impl(iter, correction, take_sqrt);\n } else if (input_dtype == kBFloat16 && iter.dtype() == kFloat) {\n \n std_var_kernel_impl(iter, correction, take_sqrt);\n } else {\n AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), \"std_cuda\", [&]() {\n std_var_kernel_impl(iter, correction, take_sqrt);\n });\n }\n}\ntemplate \nvoid mean_kernel_impl(TensorIterator& iter) {\n \n using factor_t = typename c10::scalar_value_type::type;\n factor_t factor = static_cast(iter.num_output_elements()) / iter.numel();\n gpu_reduce_kernel(iter, MeanOps {factor});\n}\nstatic void mean_kernel_cuda(TensorIterator& iter) {\n if (iter.dtype() == kHalf) {\n mean_kernel_impl(iter);\n } else if (iter.dtype(1) == kHalf && iter.dtype() == kFloat) {\n \n mean_kernel_impl(iter);\n } else if(iter.dtype() == kBFloat16) {\n mean_kernel_impl(iter);\n } else if (iter.dtype(1) == kBFloat16 && iter.dtype() == kFloat) {\n \n mean_kernel_impl(iter);\n } else {\n AT_DISPATCH_ALL_TYPES_AND_COMPLEX(iter.dtype(), \"mean_cuda\", [&]() {\n mean_kernel_impl(iter);\n });\n }\n}\nREGISTER_DISPATCH(std_var_stub, &std_var_kernel_cuda);\nREGISTER_DISPATCH(mean_stub, &mean_kernel_cuda);\n} \n\n###", "hip": " \n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \nnamespace at::native {\ntemplate \nvoid std_var_kernel_impl(TensorIterator& iter, double correction, bool take_sqrt) {\n \n \n using accscalar_t = at::acc_type;\n using ops_t = WelfordOps>;\n ops_t ops(static_cast(correction), take_sqrt);\n gpu_reduce_kernel(iter, ops, typename ops_t::acc_t{});\n}\nstatic void std_var_kernel_hip(TensorIterator& iter, double correction, bool take_sqrt) {\n const auto input_dtype = iter.input_dtype();\n if (input_dtype == kHalf && iter.dtype() == kFloat) {\n \n std_var_kernel_impl(iter, correction, take_sqrt);\n } else if (input_dtype == kBFloat16 && iter.dtype() == kFloat) {\n \n std_var_kernel_impl(iter, correction, take_sqrt);\n } else {\n AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), \"std_hip\", [&]() {\n std_var_kernel_impl(iter, correction, take_sqrt);\n });\n }\n}\ntemplate \nvoid mean_kernel_impl(TensorIterator& iter) {\n \n using factor_t = typename c10::scalar_value_type::type;\n factor_t factor = static_cast(iter.num_output_elements()) / iter.numel();\n gpu_reduce_kernel(iter, MeanOps {factor});\n}\nstatic void mean_kernel_hip(TensorIterator& iter) {\n if (iter.dtype() == kHalf) {\n mean_kernel_impl(iter);\n } else if (iter.dtype(1) == kHalf && iter.dtype() == kFloat) {\n \n mean_kernel_impl(iter);\n } else if(iter.dtype() == kBFloat16) {\n mean_kernel_impl(iter);\n } else if (iter.dtype(1) == kBFloat16 && iter.dtype() == kFloat) {\n \n mean_kernel_impl(iter);\n } else {\n AT_DISPATCH_ALL_TYPES_AND_COMPLEX(iter.dtype(), \"mean_hip\", [&]() {\n mean_kernel_impl(iter);\n });\n }\n}\nREGISTER_DISPATCH(std_var_stub, &std_var_kernel_hip);\nREGISTER_DISPATCH(mean_stub, &mean_kernel_hip);\n} ###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\n// This reduction accumulates results as the type `acc_t`. By default, when\n// `scalar_t` is complex, `acc_t` is the downgraded real number type.\n// Otherwise, `acc_t` and `scalar_t` are the same type.\ntemplate ::type, typename out_t=typename scalar_value_type::type>\nvoid norm_kernel_cuda_impl(TensorIterator& iter, double p) {\n if (p == static_cast(0)) {\n gpu_reduce_kernel(iter, NormZeroOps(), 0);\n } else if (p == static_cast(1)) {\n gpu_reduce_kernel(iter, NormOneOps(), 0);\n } else if (p == static_cast(2)) {\n gpu_reduce_kernel(iter, NormTwoOps(), 0);\n } else if (p == static_cast(INFINITY)) {\n gpu_reduce_kernel(iter, AbsMaxOps(), 0);\n } else if (p == static_cast(-INFINITY)) {\n gpu_reduce_kernel(iter, AbsMinOps(), std::numeric_limits::infinity());\n } else {\n gpu_reduce_kernel(iter, NormOps{acc_t(p)}, 0);\n }\n}\n\nvoid norm_launch_kernel(TensorIterator& iter, double ord) {\n if (iter.dtype(0) == kHalf) {\n return norm_kernel_cuda_impl(iter, ord);\n } else if (iter.input_dtype() == kHalf && iter.dtype(0) == kFloat) {\n // type promotion that does cast and reduction in a single kernel\n return norm_kernel_cuda_impl(iter, ord);\n }\n else if(iter.dtype(0) == kBFloat16) {\n return norm_kernel_cuda_impl(iter, ord);\n } else if (iter.input_dtype() == kBFloat16 && iter.dtype(0) == kFloat) {\n // type promotion that does cast and reduction in a single kernel\n return norm_kernel_cuda_impl(iter, ord);\n }\n AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(iter.input_dtype(), \"norm_cuda\", [&] {\n norm_kernel_cuda_impl(iter, ord);\n });\n}\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n\nnamespace at::native {\n\n// This reduction accumulates results as the type `acc_t`. By default, when\n// `scalar_t` is complex, `acc_t` is the downgraded real number type.\n// Otherwise, `acc_t` and `scalar_t` are the same type.\ntemplate ::type, typename out_t=typename scalar_value_type::type>\nvoid norm_kernel_hip_impl(TensorIterator& iter, double p) {\n if (p == static_cast(0)) {\n gpu_reduce_kernel(iter, NormZeroOps(), 0);\n } else if (p == static_cast(1)) {\n gpu_reduce_kernel(iter, NormOneOps(), 0);\n } else if (p == static_cast(2)) {\n gpu_reduce_kernel(iter, NormTwoOps(), 0);\n } else if (p == static_cast(INFINITY)) {\n gpu_reduce_kernel(iter, AbsMaxOps(), 0);\n } else if (p == static_cast(-INFINITY)) {\n gpu_reduce_kernel(iter, AbsMinOps(), std::numeric_limits::infinity());\n } else {\n gpu_reduce_kernel(iter, NormOps{acc_t(p)}, 0);\n }\n}\n\nvoid norm_launch_kernel(TensorIterator& iter, double ord) {\n if (iter.dtype(0) == kHalf) {\n return norm_kernel_hip_impl(iter, ord);\n } else if (iter.input_dtype() == kHalf && iter.dtype(0) == kFloat) {\n // type promotion that does cast and reduction in a single kernel\n return norm_kernel_hip_impl(iter, ord);\n }\n else if(iter.dtype(0) == kBFloat16) {\n return norm_kernel_hip_impl(iter, ord);\n } else if (iter.input_dtype() == kBFloat16 && iter.dtype(0) == kFloat) {\n // type promotion that does cast and reduction in a single kernel\n return norm_kernel_hip_impl(iter, ord);\n }\n AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(iter.input_dtype(), \"norm_hip\", [&] {\n norm_kernel_hip_impl(iter, ord);\n });\n}\n\n} // namespace at::native\n###" }, { "cuda": "\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n\n#include \n\nnamespace at::native {\nnamespace {\n\nvoid renorm_scale_factor_impl(TensorIteratorBase& iter, double maxnorm) {\n AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), \"renorm_scale_factor_cpu\", [&] {\n const auto maxnorm_s = static_cast(maxnorm);\n gpu_kernel(\n iter,\n [maxnorm_s] GPU_LAMBDA (scalar_t norm) -> scalar_t {\n const auto eps = static_cast(1e-7);\n const auto one = static_cast(1.0);\n return (norm > maxnorm_s) ?\n maxnorm_s / (norm + eps) : one;\n });\n });\n}\n\n} // namespace (anonymous)\n\nREGISTER_DISPATCH(renorm_scale_factor_stub, &renorm_scale_factor_impl);\n\n} // namespace at::native\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#define TORCH_ASSERT_NO_OPERATORS\n#include \n#include \n#include \n\n#include \n\nnamespace at::native {\nnamespace {\n\nvoid renorm_scale_factor_impl(TensorIteratorBase& iter, double maxnorm) {\n AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), \"renorm_scale_factor_cpu\", [&] {\n const auto maxnorm_s = static_cast(maxnorm);\n gpu_kernel(\n iter,\n [maxnorm_s] GPU_LAMBDA (scalar_t norm) -> scalar_t {\n const auto eps = static_cast(1e-7);\n const auto one = static_cast(1.0);\n return (norm > maxnorm_s) ?\n maxnorm_s / (norm + eps) : one;\n });\n });\n}\n\n} // namespace (anonymous)\n\nREGISTER_DISPATCH(renorm_scale_factor_stub, &renorm_scale_factor_impl);\n\n} // namespace at::native\n###" }, { "cuda": "\n#include \n#include \n\nnamespace at {\nnamespace cuda {\nnamespace detail {\n\nstruct SizeAndStride {\n int64_t size;\n int64_t stride;\n};\n\n/*\n A comparator that will sort SizeAndStride structs by stride,\n in ascending order.\n */\n int compareSizeAndStride(const void* a, const void* b) {\n const SizeAndStride* aS = (const SizeAndStride*) a;\n const SizeAndStride* bS = (const SizeAndStride*) b;\n\n if (aS->stride < bS->stride) return -1;\n if (aS->stride == bS->stride) return 0;\n return 1;\n}\n\n/*\nReturns false if there is no possibility that the tensor\nhas \"overlapping\" indices and true otherwise.\n\"Overlapping\" indices are two+ valid indices that specify\nthe same offset within the tensor.\nThe function does this by checking for a sufficient but not\nnecessary condition of no overlap. In particular, that\nthat there exists an ordering of the tensor's dimensions\nthat is nicely \"nested,\" with each dimension contained\nwithin the next one.\n*/\nbool maybeOverlappingIndices(const TensorBase& t) {\n /* Extract size/stride arrays; only consider size >1 dims. */\n std::vector info(t.dim());\n int dims = t.dim();\n int nonSize1Dims = 0;\n for (int i = 0; i < dims; ++i) {\n int64_t size = t.size(i);\n if (size > 1) {\n info[nonSize1Dims].size = size;\n info[nonSize1Dims].stride = t.stride(i);\n\n if (info[nonSize1Dims].stride < 1) {\n return true;\n }\n\n ++nonSize1Dims;\n }\n }\n\n // Short-circuits if tensor is a single element.\n if (nonSize1Dims == 0) {\n return false;\n }\n\n /* Ascending order (innermost dimension in sorted view is at [0]) */\n qsort(info.data(), nonSize1Dims, sizeof(SizeAndStride), compareSizeAndStride);\n\n for (int i = 0; i < (nonSize1Dims - 1); ++i) {\n if (((info[i].size - 1) * info[i].stride) >= info[i + 1].stride) {\n return true;\n }\n }\n\n return false;\n}\n\n} // detail\n} // cuda\n} // at\n\n\n###", "hip": " // !!! This is a file automatically generated by hipify!!!\n#include \n#include \n\nnamespace at {\nnamespace hip {\nnamespace detail {\n\nstruct SizeAndStride {\n int64_t size;\n int64_t stride;\n};\n\n/*\n A comparator that will sort SizeAndStride structs by stride,\n in ascending order.\n */\n int compareSizeAndStride(const void* a, const void* b) {\n const SizeAndStride* aS = (const SizeAndStride*) a;\n const SizeAndStride* bS = (const SizeAndStride*) b;\n\n if (aS->stride < bS->stride) return -1;\n if (aS->stride == bS->stride) return 0;\n return 1;\n}\n\n/*\nReturns false if there is no possibility that the tensor\nhas \"overlapping\" indices and true otherwise.\n\"Overlapping\" indices are two+ valid indices that specify\nthe same offset within the tensor.\nThe function does this by checking for a sufficient but not\nnecessary condition of no overlap. In particular, that\nthat there exists an ordering of the tensor's dimensions\nthat is nicely \"nested,\" with each dimension contained\nwithin the next one.\n*/\nbool maybeOverlappingIndices(const TensorBase& t) {\n /* Extract size/stride arrays; only consider size >1 dims. */\n std::vector info(t.dim());\n int dims = t.dim();\n int nonSize1Dims = 0;\n for (int i = 0; i < dims; ++i) {\n int64_t size = t.size(i);\n if (size > 1) {\n info[nonSize1Dims].size = size;\n info[nonSize1Dims].stride = t.stride(i);\n\n if (info[nonSize1Dims].stride < 1) {\n return true;\n }\n\n ++nonSize1Dims;\n }\n }\n\n // Short-circuits if tensor is a single element.\n if (nonSize1Dims == 0) {\n return false;\n }\n\n /* Ascending order (innermost dimension in sorted view is at [0]) */\n qsort(info.data(), nonSize1Dims, sizeof(SizeAndStride), compareSizeAndStride);\n\n for (int i = 0; i < (nonSize1Dims - 1); ++i) {\n if (((info[i].size - 1) * info[i].stride) >= info[i + 1].stride) {\n return true;\n }\n }\n\n return false;\n}\n\n} // detail\n} // cuda\n} // at\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n#pragma once\n\nnamespace onnxruntime {\nnamespace cuda {\n\nstruct CtxAlpha {\n float alpha;\n};\n\nstruct CtxAlphaBeta {\n float alpha;\n float beta;\n};\n\nstruct CtxAlphaGamma {\n float alpha;\n float gamma;\n};\n\nstruct CtxNull {\n};\n\ntypedef CtxAlpha CtxElu;\ntypedef CtxAlphaBeta CtxHardSigmoid;\ntypedef CtxAlpha CtxLeakyRelu;\ntypedef CtxNull CtxRelu;\ntypedef CtxAlphaGamma CtxSelu;\ntypedef CtxNull CtxSigmoid;\ntypedef CtxNull CtxSoftplus;\ntypedef CtxNull CtxSoftsign;\ntypedef CtxNull CtxTanh;\ntypedef CtxAlpha CtxThresholdedRelu;\n\n#define UNARY_ACTIVATION_OPS() \\\n UNARY_ACTIVATION_OP_NAME(Elu) \\\n UNARY_ACTIVATION_OP_NAME(HardSigmoid) \\\n UNARY_ACTIVATION_OP_NAME(LeakyRelu) \\\n UNARY_ACTIVATION_OP_NAME(Relu) \\\n UNARY_ACTIVATION_OP_NAME(Selu) \\\n UNARY_ACTIVATION_OP_NAME(Sigmoid) \\\n UNARY_ACTIVATION_OP_NAME(Softplus) \\\n UNARY_ACTIVATION_OP_NAME(Softsign) \\\n UNARY_ACTIVATION_OP_NAME(Tanh) \\\n UNARY_ACTIVATION_OP_NAME(ThresholdedRelu)\n\n#define UNARY_ACTIVATION_IMPL_DECLARATION(name) \\\n template \\\n void Impl_##name( \\\n cudaStream_t stream, \\\n const T* input_data, \\\n T* output_data, \\\n const Ctx##name* func_ctx, \\\n size_t count)\n\n#define UNARY_ACTIVATION_OP_NAME(name) UNARY_ACTIVATION_IMPL_DECLARATION(name);\nUNARY_ACTIVATION_OPS()\n#undef UNARY_ACTIVATION_OP_NAME\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n#pragma once\n\nnamespace onnxruntime {\nnamespace rocm {\n\nstruct CtxAlpha {\n float alpha;\n};\n\nstruct CtxAlphaBeta {\n float alpha;\n float beta;\n};\n\nstruct CtxAlphaGamma {\n float alpha;\n float gamma;\n};\n\nstruct CtxNull {\n};\n\ntypedef CtxAlpha CtxElu;\ntypedef CtxAlphaBeta CtxHardSigmoid;\ntypedef CtxAlpha CtxLeakyRelu;\ntypedef CtxNull CtxRelu;\ntypedef CtxAlphaGamma CtxSelu;\ntypedef CtxNull CtxSigmoid;\ntypedef CtxNull CtxSoftplus;\ntypedef CtxNull CtxSoftsign;\ntypedef CtxNull CtxTanh;\ntypedef CtxAlpha CtxThresholdedRelu;\n\n#define UNARY_ACTIVATION_OPS() \\\n UNARY_ACTIVATION_OP_NAME(Elu) \\\n UNARY_ACTIVATION_OP_NAME(HardSigmoid) \\\n UNARY_ACTIVATION_OP_NAME(LeakyRelu) \\\n UNARY_ACTIVATION_OP_NAME(Relu) \\\n UNARY_ACTIVATION_OP_NAME(Selu) \\\n UNARY_ACTIVATION_OP_NAME(Sigmoid) \\\n UNARY_ACTIVATION_OP_NAME(Softplus) \\\n UNARY_ACTIVATION_OP_NAME(Softsign) \\\n UNARY_ACTIVATION_OP_NAME(Tanh) \\\n UNARY_ACTIVATION_OP_NAME(ThresholdedRelu)\n\n#define UNARY_ACTIVATION_IMPL_DECLARATION(name) \\\n template \\\n void Impl_##name( \\\n hipStream_t stream, \\\n const T* input_data, \\\n T* output_data, \\\n const Ctx##name* func_ctx, \\\n size_t count)\n\n#define UNARY_ACTIVATION_OP_NAME(name) UNARY_ACTIVATION_IMPL_DECLARATION(name);\nUNARY_ACTIVATION_OPS()\n#undef UNARY_ACTIVATION_OP_NAME\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n#pragma once\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass ScatterElements final : public CudaKernel {\n public:\n ScatterElements(const OpKernelInfo& info) : CudaKernel(info) {\n ORT_ENFORCE(info.GetAttr(\"axis\", &axis_).IsOK(),\n \"Missing/Invalid 'axis' attribute value\");\n }\n ~ScatterElements() = default;\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n template \n struct ComputeImpl;\n\n int64_t axis_;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n#pragma once\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass ScatterElements final : public RocmKernel {\n public:\n ScatterElements(const OpKernelInfo& info) : RocmKernel(info) {\n ORT_ENFORCE(info.GetAttr(\"axis\", &axis_).IsOK(),\n \"Missing/Invalid 'axis' attribute value\");\n }\n ~ScatterElements() = default;\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n template \n struct ComputeImpl;\n\n int64_t axis_;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nstruct GatherScatterElementsArgs;\n\ntemplate \nStatus ScatterElementsImpl(cudaStream_t stream, const T* input_data, const TIndex* indices_data, const T* updates_data,\n T* output_data, const GatherScatterElementsArgs& args);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nstruct GatherScatterElementsArgs;\n\ntemplate \nStatus ScatterElementsImpl(hipStream_t stream, const T* input_data, const TIndex* indices_data, const T* updates_data,\n T* output_data, const GatherScatterElementsArgs& args);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n\n\n#include \"core/providers/cuda/tensor/scatter_nd.h\"\n#include \"core/providers/cuda/tensor/scatter_nd_impl.h\"\n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n#include \"core/providers/cpu/tensor/utils.h\"\nnamespace onnxruntime {\nnamespace cuda {\nONNX_OPERATOR_VERSIONED_KERNEL_EX(ScatterND, kOnnxDomain, 11, 12, kCudaExecutionProvider, (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .MayInplace(0, 0), ScatterND);\nONNX_OPERATOR_KERNEL_EX(ScatterND, kOnnxDomain, 13, kCudaExecutionProvider, (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .MayInplace(0, 0), ScatterND);\nStatus ScatterND::ComputeInternal(OpKernelContext* context) const {\n const auto* input_tensor = context->Input(0);\n const auto* indices_tensor = context->Input(1);\n const auto* updates_tensor = context->Input(2);\n const auto& input_shape = input_tensor->Shape();\n const auto& indices_shape = indices_tensor->Shape();\n const auto& updates_shape = updates_tensor->Shape();\n \n ORT_RETURN_IF_ERROR(onnxruntime::ScatterND::ValidateShapes(input_shape, indices_shape, updates_shape));\n auto* output_tensor = context->Output(0, input_shape);\n const void* input_data = input_tensor->DataRaw();\n void* output_data = output_tensor->MutableDataRaw();\n size_t element_size = input_tensor->DataType()->Size();\n if (input_data != output_data) {\n \n CUDA_RETURN_IF_ERROR(\n cudaMemcpyAsync(output_data, input_data, input_tensor->SizeInBytes(), cudaMemcpyDeviceToDevice, Stream(context)));\n }\n \n if (indices_shape.Size() == 0) {\n return Status::OK();\n }\n auto last_index_dimension = indices_shape[indices_shape.NumDimensions() - 1];\n \n \n \n TensorPitches input_strides(input_shape);\n std::vector element_counts_and_input_dims(last_index_dimension * 2, 0LL);\n for (int64_t i = 0; i < last_index_dimension; ++i) {\n element_counts_and_input_dims[i] = input_strides[i];\n element_counts_and_input_dims[i + last_index_dimension] = input_shape[i];\n }\n CudaAsyncBuffer element_counts_and_input_dims_gpu(this, element_counts_and_input_dims);\n ORT_RETURN_IF_ERROR(element_counts_and_input_dims_gpu.CopyToGpu(context->GetComputeStream()));\n ORT_RETURN_IF_ERROR(ScatterNDImpl(\n Stream(context), output_data, element_size, indices_shape.Size() / static_cast(last_index_dimension), indices_tensor->Data(), last_index_dimension, element_counts_and_input_dims_gpu.GpuPtr(), updates_tensor->DataRaw(), input_shape.SizeFromDimension(last_index_dimension)));\n return Status::OK();\n}\n} \n} \n\n###", "hip": " \n\n#include \"core/providers/rocm/tensor/scatter_nd.h\"\n#include \"core/providers/rocm/tensor/scatter_nd_impl.h\"\n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n#include \"core/providers/cpu/tensor/utils.h\"\nnamespace onnxruntime {\nnamespace rocm {\nONNX_OPERATOR_VERSIONED_KERNEL_EX(ScatterND, kOnnxDomain, 11, 12, kRocmExecutionProvider, (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .MayInplace(0, 0), ScatterND);\nONNX_OPERATOR_KERNEL_EX(ScatterND, kOnnxDomain, 13, kRocmExecutionProvider, (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .MayInplace(0, 0), ScatterND);\nStatus ScatterND::ComputeInternal(OpKernelContext* context) const {\n const auto* input_tensor = context->Input(0);\n const auto* indices_tensor = context->Input(1);\n const auto* updates_tensor = context->Input(2);\n const auto& input_shape = input_tensor->Shape();\n const auto& indices_shape = indices_tensor->Shape();\n const auto& updates_shape = updates_tensor->Shape();\n \n ORT_RETURN_IF_ERROR(onnxruntime::ScatterND::ValidateShapes(input_shape, indices_shape, updates_shape));\n auto* output_tensor = context->Output(0, input_shape);\n const void* input_data = input_tensor->DataRaw();\n void* output_data = output_tensor->MutableDataRaw();\n size_t element_size = input_tensor->DataType()->Size();\n if (input_data != output_data) {\n \n HIP_RETURN_IF_ERROR(\n hipMemcpyAsync(output_data, input_data, input_tensor->SizeInBytes(), hipMemcpyDeviceToDevice, Stream(context)));\n }\n \n if (indices_shape.Size() == 0) {\n return Status::OK();\n }\n auto last_index_dimension = indices_shape[indices_shape.NumDimensions() - 1];\n \n \n \n TensorPitches input_strides(input_shape);\n std::vector element_counts_and_input_dims(last_index_dimension * 2, 0LL);\n for (int64_t i = 0; i < last_index_dimension; ++i) {\n element_counts_and_input_dims[i] = input_strides[i];\n element_counts_and_input_dims[i + last_index_dimension] = input_shape[i];\n }\n RocmAsyncBuffer element_counts_and_input_dims_gpu(this, element_counts_and_input_dims);\n ORT_RETURN_IF_ERROR(element_counts_and_input_dims_gpu.CopyToGpu(context->GetComputeStream()));\n ORT_RETURN_IF_ERROR(ScatterNDImpl(\n Stream(context), output_data, element_size, indices_shape.Size() / static_cast(last_index_dimension), indices_tensor->Data(), last_index_dimension, element_counts_and_input_dims_gpu.GpuPtr(), updates_tensor->DataRaw(), input_shape.SizeFromDimension(last_index_dimension)));\n return Status::OK();\n}\n} \n} ###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"core/providers/cpu/tensor/scatter_nd.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass ScatterND final : public CudaKernel {\n public:\n explicit ScatterND(const OpKernelInfo& info) : CudaKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"core/providers/cpu/tensor/scatter_nd.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass ScatterND final : public RocmKernel {\n public:\n explicit ScatterND(const OpKernelInfo& info) : RocmKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nStatus ScatterNDImpl(\n cudaStream_t stream,\n void* output_data,\n const size_t element_size,\n const size_t num_indices,\n const int64_t* indices_data,\n const int64_t last_index_dimension,\n const int64_t* element_counts_and_input_dims,\n const void* updates_data,\n const size_t num_updates_elements);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nStatus ScatterNDImpl(\n hipStream_t stream,\n void* output_data,\n const size_t element_size,\n const size_t num_indices,\n const int64_t* indices_data,\n const int64_t last_index_dimension,\n const int64_t* element_counts_and_input_dims,\n const void* updates_data,\n const size_t num_updates_elements);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"sequence_op.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nONNX_OPERATOR_KERNEL_EX(\n SequenceAt,\n kOnnxDomain,\n 11,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .InputMemoryType(OrtMemTypeCPUInput, 1)\n .TypeConstraint(\"S\", DataTypeImpl::AllFixedSizeSequenceTensorTypes())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .TypeConstraint(\"I\", std::vector{\n DataTypeImpl::GetTensorType(),\n DataTypeImpl::GetTensorType()}),\n SequenceAt);\n\nONNX_OPERATOR_KERNEL_EX(\n SequenceConstruct,\n kOnnxDomain,\n 11,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .TypeConstraint(\"S\", DataTypeImpl::AllFixedSizeSequenceTensorTypes()),\n SequenceConstruct);\n\nONNX_OPERATOR_KERNEL_EX(\n SequenceEmpty,\n kOnnxDomain,\n 11,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"S\", DataTypeImpl::AllFixedSizeSequenceTensorTypes()),\n SequenceEmpty);\n\nONNX_OPERATOR_KERNEL_EX(\n SequenceLength,\n kOnnxDomain,\n 11,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .OutputMemoryType(OrtMemTypeCPUInput, 0)\n .TypeConstraint(\"S\", DataTypeImpl::AllFixedSizeSequenceTensorTypes())\n .TypeConstraint(\"I\", DataTypeImpl::GetTensorType()),\n SequenceLength);\n\nONNX_OPERATOR_KERNEL_EX(\n ConcatFromSequence,\n kOnnxDomain,\n 11,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"S\", DataTypeImpl::AllFixedSizeSequenceTensorTypes()),\n ConcatFromSequence);\n\nONNX_OPERATOR_KERNEL_EX(\n SequenceErase,\n kOnnxDomain,\n 11,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .InputMemoryType(OrtMemTypeCPUInput, 1)\n .TypeConstraint(\"S\", DataTypeImpl::AllFixedSizeSequenceTensorTypes())\n .TypeConstraint(\"I\", std::vector{\n DataTypeImpl::GetTensorType(),\n DataTypeImpl::GetTensorType()}),\n SequenceErase);\n\nONNX_OPERATOR_KERNEL_EX(\n SequenceInsert,\n kOnnxDomain,\n 11,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .InputMemoryType(OrtMemTypeCPUInput, 2)\n .TypeConstraint(\"S\", DataTypeImpl::AllFixedSizeSequenceTensorTypes())\n .TypeConstraint(\"I\", std::vector{\n DataTypeImpl::GetTensorType(),\n DataTypeImpl::GetTensorType()}),\n SequenceInsert);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"sequence_op.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nONNX_OPERATOR_KERNEL_EX(\n SequenceAt,\n kOnnxDomain,\n 11,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .InputMemoryType(OrtMemTypeCPUInput, 1)\n .TypeConstraint(\"S\", DataTypeImpl::AllFixedSizeSequenceTensorTypes())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .TypeConstraint(\"I\", std::vector{\n DataTypeImpl::GetTensorType(),\n DataTypeImpl::GetTensorType()}),\n SequenceAt);\n\nONNX_OPERATOR_KERNEL_EX(\n SequenceConstruct,\n kOnnxDomain,\n 11,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .TypeConstraint(\"S\", DataTypeImpl::AllFixedSizeSequenceTensorTypes()),\n SequenceConstruct);\n\nONNX_OPERATOR_KERNEL_EX(\n SequenceEmpty,\n kOnnxDomain,\n 11,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"S\", DataTypeImpl::AllFixedSizeSequenceTensorTypes()),\n SequenceEmpty);\n\nONNX_OPERATOR_KERNEL_EX(\n SequenceLength,\n kOnnxDomain,\n 11,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .OutputMemoryType(OrtMemTypeCPUInput, 0)\n .TypeConstraint(\"S\", DataTypeImpl::AllFixedSizeSequenceTensorTypes())\n .TypeConstraint(\"I\", DataTypeImpl::GetTensorType()),\n SequenceLength);\n\nONNX_OPERATOR_KERNEL_EX(\n ConcatFromSequence,\n kOnnxDomain,\n 11,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"S\", DataTypeImpl::AllFixedSizeSequenceTensorTypes()),\n ConcatFromSequence);\n\nONNX_OPERATOR_KERNEL_EX(\n SequenceErase,\n kOnnxDomain,\n 11,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .InputMemoryType(OrtMemTypeCPUInput, 1)\n .TypeConstraint(\"S\", DataTypeImpl::AllFixedSizeSequenceTensorTypes())\n .TypeConstraint(\"I\", std::vector{\n DataTypeImpl::GetTensorType(),\n DataTypeImpl::GetTensorType()}),\n SequenceErase);\n\nONNX_OPERATOR_KERNEL_EX(\n SequenceInsert,\n kOnnxDomain,\n 11,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .InputMemoryType(OrtMemTypeCPUInput, 2)\n .TypeConstraint(\"S\", DataTypeImpl::AllFixedSizeSequenceTensorTypes())\n .TypeConstraint(\"I\", std::vector{\n DataTypeImpl::GetTensorType(),\n DataTypeImpl::GetTensorType()}),\n SequenceInsert);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/cpu/tensor/shape_op.h\"\n#include \"core/providers/cuda/cuda_fwd.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Shape,\n kOnnxDomain,\n 1, 12,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n // properly force CPU/GPU synch inside the kernel\n .OutputMemoryType(OrtMemTypeCPUInput, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .TypeConstraint(\"T1\", DataTypeImpl::GetTensorType()),\n Shape);\n\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Shape,\n kOnnxDomain,\n 13, 14,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n // properly force CPU/GPU synch inside the kernel\n .OutputMemoryType(OrtMemTypeCPUInput, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .TypeConstraint(\"T1\", DataTypeImpl::GetTensorType()),\n Shape);\n\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Shape,\n kOnnxDomain,\n 15, 18,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n // properly force CPU/GPU synch inside the kernel\n .OutputMemoryType(OrtMemTypeCPUInput, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .TypeConstraint(\"T1\", DataTypeImpl::GetTensorType()),\n Shape);\n\nONNX_OPERATOR_KERNEL_EX(\n Shape,\n kOnnxDomain,\n 19,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n // properly force CPU/GPU synch inside the kernel\n .OutputMemoryType(OrtMemTypeCPUInput, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypesIRv9())\n .TypeConstraint(\"T1\", DataTypeImpl::GetTensorType()),\n Shape);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/cpu/tensor/shape_op.h\"\n#include \"core/providers/rocm/rocm_fwd.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Shape,\n kOnnxDomain,\n 1, 12,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n // properly force CPU/GPU synch inside the kernel\n .OutputMemoryType(OrtMemTypeCPUInput, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .TypeConstraint(\"T1\", DataTypeImpl::GetTensorType()),\n Shape);\n\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Shape,\n kOnnxDomain,\n 13, 14,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n // properly force CPU/GPU synch inside the kernel\n .OutputMemoryType(OrtMemTypeCPUInput, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .TypeConstraint(\"T1\", DataTypeImpl::GetTensorType()),\n Shape);\n\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Shape,\n kOnnxDomain,\n 15, 18,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n // properly force CPU/GPU synch inside the kernel\n .OutputMemoryType(OrtMemTypeCPUInput, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .TypeConstraint(\"T1\", DataTypeImpl::GetTensorType()),\n Shape);\n\nONNX_OPERATOR_KERNEL_EX(\n Shape,\n kOnnxDomain,\n 19,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n // properly force CPU/GPU synch inside the kernel\n .OutputMemoryType(OrtMemTypeCPUInput, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypesIRv9())\n .TypeConstraint(\"T1\", DataTypeImpl::GetTensorType()),\n Shape);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/cpu/tensor/size.h\"\n#include \"core/providers/cuda/cuda_fwd.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Size,\n kOnnxDomain,\n 1, 12,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .OutputMemoryType(OrtMemTypeCPUInput, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllTensorTypes())\n .TypeConstraint(\"T1\", DataTypeImpl::GetTensorType()),\n Size);\n\nONNX_OPERATOR_KERNEL_EX(\n Size,\n kOnnxDomain,\n 13,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n // properly force CPU/GPU synch inside the kernel\n .OutputMemoryType(OrtMemTypeCPUInput, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllTensorTypes())\n .TypeConstraint(\"T1\", DataTypeImpl::GetTensorType()),\n Size);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/cpu/tensor/size.h\"\n#include \"core/providers/rocm/rocm_fwd.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Size,\n kOnnxDomain,\n 1, 12,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .OutputMemoryType(OrtMemTypeCPUInput, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllTensorTypes())\n .TypeConstraint(\"T1\", DataTypeImpl::GetTensorType()),\n Size);\n\nONNX_OPERATOR_KERNEL_EX(\n Size,\n kOnnxDomain,\n 13,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n // properly force CPU/GPU synch inside the kernel\n .OutputMemoryType(OrtMemTypeCPUInput, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllTensorTypes())\n .TypeConstraint(\"T1\", DataTypeImpl::GetTensorType()),\n Size);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"core/providers/cpu/tensor/slice.h\"\n#include \"core/providers/cpu/tensor/utils.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nnamespace SliceCuda {\n\nStatus Impl(cudaStream_t stream,\n const void* input_data,\n const TensorShape& input_shape,\n void* output_data,\n SliceOp::PrepareForComputeMetadata& prepare_metadata,\n size_t element_size);\n\n} // namespace SliceCuda\n\ntemplate \nclass Slice : public CudaKernel, public SliceBase {\n public:\n Slice(const OpKernelInfo& info) : CudaKernel(info), SliceBase(info, dynamic) {}\n\n Status ComputeInternal(OpKernelContext* ctx) const override;\n\n private:\n virtual const Tensor* GetSlicedOrUnslicedTensor(OpKernelContext* ctx) const;\n virtual Status FillInputVectors(OpKernelContext* ctx, TensorShapeVector& input_starts,\n TensorShapeVector& input_ends, TensorShapeVector& input_axes,\n TensorShapeVector& input_steps) const;\n\n virtual Status CallSliceImp(size_t element_size, size_t dimension_count, const TArray& starts_buffer,\n const TArray& steps_buffer, const TArray& input_strides,\n const TArray& output_strides, OpKernelContext* ctx,\n const TensorShape& output_shape) const;\n};\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"core/providers/cpu/tensor/slice.h\"\n#include \"core/providers/cpu/tensor/utils.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nnamespace SliceRocm {\n\nStatus Impl(hipStream_t stream,\n const void* input_data,\n const TensorShape& input_shape,\n void* output_data,\n SliceOp::PrepareForComputeMetadata& prepare_metadata,\n size_t element_size);\n\n} // namespace SliceRocm\n\ntemplate \nclass Slice : public RocmKernel, public SliceBase {\n public:\n Slice(const OpKernelInfo& info) : RocmKernel(info), SliceBase(info, dynamic) {}\n\n Status ComputeInternal(OpKernelContext* ctx) const override;\n\n private:\n virtual const Tensor* GetSlicedOrUnslicedTensor(OpKernelContext* ctx) const;\n virtual Status FillInputVectors(OpKernelContext* ctx, TensorShapeVector& input_starts,\n TensorShapeVector& input_ends, TensorShapeVector& input_axes,\n TensorShapeVector& input_steps) const;\n\n virtual Status CallSliceImp(size_t element_size, size_t dimension_count, const TArray& starts_buffer,\n const TArray& steps_buffer, const TArray& input_strides,\n const TArray& output_strides, OpKernelContext* ctx,\n const TensorShape& output_shape) const;\n};\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nStatus SliceImpl(cudaStream_t stream,\n const size_t element_size,\n const int32_t dimension_count,\n const TArray& starts,\n const TArray& steps,\n const TArray& input_strides,\n const TArray& output_strides,\n const void* input_data,\n void* output_data,\n const size_t N);\n\n#ifdef ENABLE_TRAINING_OPS\nStatus SliceImplGrad(cudaStream_t stream,\n const size_t element_size,\n const int32_t dimension_count,\n const TArray& starts,\n const TArray& steps,\n const TArray& input_strides,\n const TArray& output_strides,\n const void* input_data,\n void* output_data,\n const size_t N);\n#endif // ENABLE_TRAINING_OPS\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nStatus SliceImpl(hipStream_t stream,\n const size_t element_size,\n const int32_t dimension_count,\n const TArray& starts,\n const TArray& steps,\n const TArray& input_strides,\n const TArray& output_strides,\n const void* input_data,\n void* output_data,\n const size_t N);\n\n#ifdef ENABLE_TRAINING_OPS\nStatus SliceImplGrad(hipStream_t stream,\n const size_t element_size,\n const int32_t dimension_count,\n const TArray& starts,\n const TArray& steps,\n const TArray& input_strides,\n const TArray& output_strides,\n const void* input_data,\n void* output_data,\n const size_t N);\n#endif // ENABLE_TRAINING_OPS\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n#include \"constant_of_shape.h\"\n\nusing namespace ::onnxruntime::common;\nusing namespace ONNX_NAMESPACE;\nnamespace onnxruntime {\nnamespace cuda {\n\nONNX_OPERATOR_KERNEL_EX(\n ConstantOfShape,\n kOnnxDomain,\n 9,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .InputMemoryType(OrtMemTypeCPUInput, 0)\n .TypeConstraint(\"T1\", DataTypeImpl::GetTensorType())\n .TypeConstraint(\"T2\", DataTypeImpl::AllFixedSizeTensorTypes()),\n ConstantOfShape);\n\nStatus ConstantOfShape::ComputeInternal(OpKernelContext* ctx) const {\n Tensor* output_tensor = nullptr;\n ORT_RETURN_IF_ERROR(PrepareCompute(ctx, &output_tensor));\n auto output_data = output_tensor->MutableDataRaw();\n const auto size = output_tensor->Shape().Size();\n const void* value_ptr = GetValuePtr();\n const auto element_size = output_tensor->DataType()->Size();\n\n#define CASE(TYPE) \\\n case sizeof(TYPE): \\\n if (size > 0) { \\\n cuda::Fill(Stream(ctx), reinterpret_cast(output_data), *(reinterpret_cast(value_ptr)), size); \\\n } \\\n break;\n\n switch (element_size) {\n CASE(int8_t)\n CASE(int16_t)\n CASE(int32_t)\n CASE(int64_t)\n default:\n ORT_THROW(\"Unsupported value attribute datatype with sizeof=: \", element_size);\n break;\n }\n\n return Status::OK();\n}\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n#include \"constant_of_shape.h\"\n\nusing namespace ::onnxruntime::common;\nusing namespace ONNX_NAMESPACE;\nnamespace onnxruntime {\nnamespace rocm {\n\nONNX_OPERATOR_KERNEL_EX(\n ConstantOfShape,\n kOnnxDomain,\n 9,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .InputMemoryType(OrtMemTypeCPUInput, 0)\n .TypeConstraint(\"T1\", DataTypeImpl::GetTensorType())\n .TypeConstraint(\"T2\", DataTypeImpl::AllFixedSizeTensorTypes()),\n ConstantOfShape);\n\nStatus ConstantOfShape::ComputeInternal(OpKernelContext* ctx) const {\n Tensor* output_tensor = nullptr;\n ORT_RETURN_IF_ERROR(PrepareCompute(ctx, &output_tensor));\n auto output_data = output_tensor->MutableDataRaw();\n const auto size = output_tensor->Shape().Size();\n const void* value_ptr = GetValuePtr();\n const auto element_size = output_tensor->DataType()->Size();\n\n#define CASE(TYPE) \\\n case sizeof(TYPE): \\\n if (size > 0) { \\\n rocm::Fill(Stream(ctx), reinterpret_cast(output_data), *(reinterpret_cast(value_ptr)), size); \\\n } \\\n break;\n\n switch (element_size) {\n CASE(int8_t)\n CASE(int16_t)\n CASE(int32_t)\n CASE(int64_t)\n default:\n ORT_THROW(\"Unsupported value attribute datatype with sizeof=: \", element_size);\n break;\n }\n\n return Status::OK();\n}\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"core/providers/cpu/tensor/space_depth_ops.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass SpaceToDepth final : public CudaKernel, SpaceDepthBase {\n public:\n explicit SpaceToDepth(const OpKernelInfo& info) : CudaKernel(info), SpaceDepthBase(info) {\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\nclass DepthToSpace final : public CudaKernel, SpaceDepthBase {\n public:\n explicit DepthToSpace(const OpKernelInfo& info) : CudaKernel(info), SpaceDepthBase(info) {\n std::string mode;\n // if mode doesn't exist, then it is the default \"DCR\" mode\n // (or) it is an opset < 11 model for which the only mode is \"DCR\" mode\n if (info.GetAttr(\"mode\", &mode).IsOK()) {\n if (mode == \"CRD\")\n is_dcr_ = false;\n\n else if (mode != \"DCR\")\n ORT_THROW(\"DepthToSpace op: only 'DCR' and 'CRD' modes are supported\");\n }\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n bool is_dcr_ = true;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"core/providers/cpu/tensor/space_depth_ops.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass SpaceToDepth final : public RocmKernel, SpaceDepthBase {\n public:\n explicit SpaceToDepth(const OpKernelInfo& info) : RocmKernel(info), SpaceDepthBase(info) {\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\nclass DepthToSpace final : public RocmKernel, SpaceDepthBase {\n public:\n explicit DepthToSpace(const OpKernelInfo& info) : RocmKernel(info), SpaceDepthBase(info) {\n std::string mode;\n // if mode doesn't exist, then it is the default \"DCR\" mode\n // (or) it is an opset < 11 model for which the only mode is \"DCR\" mode\n if (info.GetAttr(\"mode\", &mode).IsOK()) {\n if (mode == \"CRD\")\n is_dcr_ = false;\n\n else if (mode != \"DCR\")\n ORT_THROW(\"DepthToSpace op: only 'DCR' and 'CRD' modes are supported\");\n }\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n bool is_dcr_ = true;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"core/providers/cpu/tensor/split.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass SplitKernel : public CudaKernel, public SplitBase {\n public:\n SplitKernel(const OpKernelInfo& info, uint32_t opset) : CudaKernel(info), SplitBase(info, opset) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n// versions 2, 11 and 13\nclass Split_2_13 final : public SplitKernel {\n public:\n // use opset 1 for all versions earlier than 18\n Split_2_13(const OpKernelInfo& info) : SplitKernel(info, /* opset */ 1) {}\n};\n\nclass Split_18 final : public SplitKernel {\n public:\n Split_18(const OpKernelInfo& info) : SplitKernel(info, 18) {}\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"core/providers/cpu/tensor/split.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass SplitKernel : public RocmKernel, public SplitBase {\n public:\n SplitKernel(const OpKernelInfo& info, uint32_t opset) : RocmKernel(info), SplitBase(info, opset) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n// versions 2, 11 and 13\nclass Split_2_13 final : public SplitKernel {\n public:\n // use opset 1 for all versions earlier than 18\n Split_2_13(const OpKernelInfo& info) : SplitKernel(info, /* opset */ 1) {}\n};\n\nclass Split_18 final : public SplitKernel {\n public:\n Split_18(const OpKernelInfo& info) : SplitKernel(info, 18) {}\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n#include \"core/common/common.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nStatus SplitSameSplitDimImpl(cudaStream_t stream, const size_t element_size, const int block_size_including_axis_dim,\n const int block_size_inside_axis_dim, const int64_t split_size, const int num_outputs,\n const void* input_data, OutputDataArray output_data, const size_t input_size);\n\nStatus SplitImpl(cudaStream_t stream, const size_t element_size, const int block_size_including_axis_dim,\n const int block_size_inside_axis_dim, const int64_t* split_sizes, const int64_t* split_sizes_range,\n const int64_t* axis_dimension_input_output_mapping, const int num_outputs, const void* input_data,\n void** output_data, const size_t input_size);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n#include \"core/common/common.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nStatus SplitSameSplitDimImpl(hipStream_t stream, const size_t element_size, const int block_size_including_axis_dim,\n const int block_size_inside_axis_dim, const int64_t split_size, const int num_outputs,\n const void* input_data, OutputDataArray output_data, const size_t input_size);\n\nStatus SplitImpl(hipStream_t stream, const size_t element_size, const int block_size_including_axis_dim,\n const int block_size_inside_axis_dim, const int64_t* split_sizes, const int64_t* split_sizes_range,\n const int64_t* axis_dimension_input_output_mapping, const int num_outputs, const void* input_data,\n void** output_data, const size_t input_size);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"squeeze.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Squeeze,\n kOnnxDomain,\n 1, 10,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .Alias(0, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes()),\n Squeeze);\n\n// explicit support for negative axis.\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Squeeze,\n kOnnxDomain,\n 11, 12,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .Alias(0, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes()),\n Squeeze);\n\n// axes is input instead of attribute\nONNX_OPERATOR_KERNEL_EX(\n Squeeze,\n kOnnxDomain,\n 13,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .Alias(0, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .InputMemoryType(OrtMemTypeCPUInput, 1),\n Squeeze);\n\nStatus Squeeze::ComputeInternal(OpKernelContext* ctx) const {\n const Tensor* X = ctx->Input(0);\n const TensorShape& X_shape = X->Shape();\n\n TensorShapeVector axes;\n size_t num_inputs = ctx->InputCount();\n if (num_inputs == 2) { // axes is an input\n const Tensor* axes_tensor = ctx->Input(1);\n ORT_ENFORCE(axes_tensor != nullptr, \"Axes input is null\");\n ORT_ENFORCE(axes_tensor->Shape().NumDimensions() == 1,\n \"An axes tensor must be a vector tensor.\");\n auto nDims = static_cast(axes_tensor->Shape()[0]);\n const auto* data = axes_tensor->Data();\n axes.assign(data, data + nDims);\n } else {\n axes.assign(axes_.begin(), axes_.end());\n }\n\n TensorShapeVector output_shape = ComputeOutputShape(X_shape, axes);\n\n Tensor* Y = ctx->Output(0, TensorShape(output_shape));\n\n const void* input = X->DataRaw();\n void* output = Y->MutableDataRaw();\n if (input == output)\n return Status::OK();\n\n auto count = X->Shape().Size();\n auto element_bytes = X->DataType()->Size();\n CUDA_RETURN_IF_ERROR(cudaMemcpyAsync(output, input, count * element_bytes, cudaMemcpyDeviceToDevice, Stream(ctx)));\n\n return Status::OK();\n}\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"squeeze.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Squeeze,\n kOnnxDomain,\n 1, 10,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .Alias(0, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes()),\n Squeeze);\n\n// explicit support for negative axis.\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Squeeze,\n kOnnxDomain,\n 11, 12,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .Alias(0, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes()),\n Squeeze);\n\n// axes is input instead of attribute\nONNX_OPERATOR_KERNEL_EX(\n Squeeze,\n kOnnxDomain,\n 13,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .Alias(0, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .InputMemoryType(OrtMemTypeCPUInput, 1),\n Squeeze);\n\nStatus Squeeze::ComputeInternal(OpKernelContext* ctx) const {\n const Tensor* X = ctx->Input(0);\n const TensorShape& X_shape = X->Shape();\n\n TensorShapeVector axes;\n size_t num_inputs = ctx->InputCount();\n if (num_inputs == 2) { // axes is an input\n const Tensor* axes_tensor = ctx->Input(1);\n ORT_ENFORCE(axes_tensor != nullptr, \"Axes input is null\");\n ORT_ENFORCE(axes_tensor->Shape().NumDimensions() == 1,\n \"An axes tensor must be a vector tensor.\");\n auto nDims = static_cast(axes_tensor->Shape()[0]);\n const auto* data = axes_tensor->Data();\n axes.assign(data, data + nDims);\n } else {\n axes.assign(axes_.begin(), axes_.end());\n }\n\n TensorShapeVector output_shape = ComputeOutputShape(X_shape, axes);\n\n Tensor* Y = ctx->Output(0, TensorShape(output_shape));\n\n const void* input = X->DataRaw();\n void* output = Y->MutableDataRaw();\n if (input == output)\n return Status::OK();\n\n auto count = X->Shape().Size();\n auto element_bytes = X->DataType()->Size();\n HIP_RETURN_IF_ERROR(hipMemcpyAsync(output, input, count * element_bytes, hipMemcpyDeviceToDevice, Stream(ctx)));\n\n return Status::OK();\n}\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"core/providers/cpu/tensor/squeeze.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass Squeeze final : public SqueezeBase, public CudaKernel {\n public:\n Squeeze(const OpKernelInfo& info) : SqueezeBase(info), CudaKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"core/providers/cpu/tensor/squeeze.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass Squeeze final : public SqueezeBase, public RocmKernel {\n public:\n Squeeze(const OpKernelInfo& info) : SqueezeBase(info), RocmKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"core/providers/cpu/tensor/tile.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nstruct Tile final : CudaKernel {\n explicit Tile(const OpKernelInfo& info) : CudaKernel(info) {\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"core/providers/cpu/tensor/tile.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nstruct Tile final : RocmKernel {\n explicit Tile(const OpKernelInfo& info) : RocmKernel(info) {\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nvoid TileImpl(cudaStream_t stream, const size_t shape_rank, const TArray& fdm_input_shape,\n const TArray& input_stride, const T* input_data, const TArray& fdm_output_strides,\n T* output_data, const size_t N);\n\ntemplate \nvoid TileMemcpyImpl(cudaStream_t stream, const T* input_data, T* output_data, const size_t num_input_elements,\n const size_t repeats);\n\ntemplate \nvoid TileBatchedMemcpyImpl(cudaStream_t stream, const T* input_data, T* output_data, const size_t size_input_row,\n const size_t num_input_elements, const size_t batch_repeats, const size_t repeats_per_batch);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nvoid TileImpl(hipStream_t stream, const size_t shape_rank, const TArray& fdm_input_shape,\n const TArray& input_stride, const T* input_data, const TArray& fdm_output_strides,\n T* output_data, const size_t N);\n\ntemplate \nvoid TileMemcpyImpl(hipStream_t stream, const T* input_data, T* output_data, const size_t num_input_elements,\n const size_t repeats);\n\ntemplate \nvoid TileBatchedMemcpyImpl(hipStream_t stream, const T* input_data, T* output_data, const size_t size_input_row,\n const size_t num_input_elements, const size_t batch_repeats, const size_t repeats_per_batch);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/common/gsl.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"core/providers/cpu/tensor/transpose.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass Transpose final : public CudaKernel, public TransposeBase {\n public:\n Transpose(const OpKernelInfo& info) : CudaKernel(info), TransposeBase(info) {\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n static Status DoTranspose(const Transpose& transpose_kernel,\n onnxruntime::Stream* ort_stream,\n const gsl::span& permutations, const Tensor& input, Tensor& output);\n\n // `input_shape_override` (if provided) overrides the shape of `input` for compute purposes\n // `output_shape_override` (if provided) overrides the shape of `output` for compute purposes\n static Status DoTranspose(const cudaDeviceProp& prop,\n cudaStream_t stream,\n const cublasHandle_t cublas_handle,\n const gsl::span& permutations,\n const Tensor& input, Tensor& output,\n const TensorShape* input_shape_override = nullptr,\n const TensorShape* output_shape_override = nullptr);\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/common/gsl.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"core/providers/cpu/tensor/transpose.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass Transpose final : public RocmKernel, public TransposeBase {\n public:\n Transpose(const OpKernelInfo& info) : RocmKernel(info), TransposeBase(info) {\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n static Status DoTranspose(const Transpose& transpose_kernel,\n onnxruntime::Stream* ort_stream,\n const gsl::span& permutations, const Tensor& input, Tensor& output);\n\n // `input_shape_override` (if provided) overrides the shape of `input` for compute purposes\n // `output_shape_override` (if provided) overrides the shape of `output` for compute purposes\n static Status DoTranspose(const hipDeviceProp_t& prop,\n hipStream_t stream,\n const rocblas_handle rocblas_handle,\n const gsl::span& permutations,\n const Tensor& input, Tensor& output,\n const TensorShape* input_shape_override = nullptr,\n const TensorShape* output_shape_override = nullptr);\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nbool CanDoTranspose3D(const cudaDeviceProp& prop,\n size_t rank, const gsl::span& input_dims, const gsl::span& permutations,\n dim3& grid_size, dim3& block_size);\nStatus Transpose3DImpl(cudaStream_t stream, size_t element_size, const TArray& input_shape, const TArray& input_strides, const void* input_data,\n void* output_data, int64_t N,\n const dim3& grid_size, const dim3& block_size);\n\nbool CanDoTranspose4DParallelizeMultipleElementsPerThreadInInnermostDim(const cudaDeviceProp& prop,\n size_t element_size,\n int32_t rank,\n const gsl::span& input_dims,\n const gsl::span& permutations,\n dim3& grid_size, dim3& block_size);\nStatus Transpose4DParallelizeMultipleElementsPerThreadInInnermostDim(cudaStream_t stream,\n size_t element_size, const TArray& input_shape,\n const TArray& input_strides, const void* input_data,\n const TArray& output_strides, void* output_data, int N,\n const dim3& grid_size, const dim3& block_size);\n\nbool CanDoTranspose4DParallelizeOneElementPerThread(const cudaDeviceProp& prop,\n size_t element_size,\n int32_t rank,\n const gsl::span& input_dims,\n const gsl::span& permutations,\n dim3& grid_size, dim3& block_size);\nStatus Transpose4DParallelizeOneElementPerThread(cudaStream_t stream,\n size_t element_size, const TArray& input_shape,\n const TArray& input_strides, const void* input_data,\n const TArray& output_strides, void* output_data, int N,\n const dim3& grid_size, const dim3& block_size);\n\nStatus TransposeImpl(cudaStream_t stream, size_t element_size, int32_t shape_rank, const TArray& input_strides,\n const void* input_data, const TArray& fdm_output_strides, void* output_data, int N);\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nbool CanDoTranspose3D(const hipDeviceProp_t& prop,\n size_t rank, const gsl::span& input_dims, const gsl::span& permutations,\n dim3& grid_size, dim3& block_size);\nStatus Transpose3DImpl(hipStream_t stream, size_t element_size, const TArray& input_shape, const TArray& input_strides, const void* input_data,\n void* output_data, int64_t N,\n const dim3& grid_size, const dim3& block_size);\n\nbool CanDoTranspose4DParallelizeMultipleElementsPerThreadInInnermostDim(const hipDeviceProp_t& prop,\n size_t element_size,\n int32_t rank,\n const gsl::span& input_dims,\n const gsl::span& permutations,\n dim3& grid_size, dim3& block_size);\nStatus Transpose4DParallelizeMultipleElementsPerThreadInInnermostDim(hipStream_t stream,\n size_t element_size, const TArray& input_shape,\n const TArray& input_strides, const void* input_data,\n const TArray& output_strides, void* output_data, int N,\n const dim3& grid_size, const dim3& block_size);\n\nbool CanDoTranspose4DParallelizeOneElementPerThread(const hipDeviceProp_t& prop,\n size_t element_size,\n int32_t rank,\n const gsl::span& input_dims,\n const gsl::span& permutations,\n dim3& grid_size, dim3& block_size);\nStatus Transpose4DParallelizeOneElementPerThread(hipStream_t stream,\n size_t element_size, const TArray& input_shape,\n const TArray& input_strides, const void* input_data,\n const TArray& output_strides, void* output_data, int N,\n const dim3& grid_size, const dim3& block_size);\n\nStatus TransposeImpl(hipStream_t stream, size_t element_size, int32_t shape_rank, const TArray& input_strides,\n const void* input_data, const TArray& fdm_output_strides, void* output_data, int N);\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/cuda/tensor/trilu.h\"\n#include \"core/providers/cuda/tensor/trilu_impl.h\"\n#include \"core/providers/cpu/tensor/utils.h\"\n\nusing namespace onnxruntime::common;\n\nnamespace onnxruntime {\nnamespace cuda {\n\nONNX_OPERATOR_KERNEL_EX(\n Trilu,\n kOnnxDomain,\n 14,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .InputMemoryType(OrtMemTypeCPUInput, 1)\n .MayInplace(0, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes()),\n Trilu);\n\nStatus Trilu::ComputeInternal(OpKernelContext* ctx) const {\n const Tensor* input_ptr = ctx->Input(0);\n const auto* k = ctx->Input(1);\n\n int64_t k_val = 0;\n if (k) {\n ORT_ENFORCE(IsScalarOr1ElementVector(k), \"k should be a 1-D or 0-D tensor.\");\n k_val = *(k->Data());\n }\n if (input_ptr == nullptr) return Status(common::ONNXRUNTIME, common::FAIL, \"input count mismatch\");\n const Tensor& input = *input_ptr;\n const auto& shape = input.Shape();\n const auto& input_dims = shape.GetDims();\n auto rank = input_dims.size();\n if (rank < 2) {\n return Status(ONNXRUNTIME, INVALID_ARGUMENT, \"Input tensor should have a rank of at least 2\");\n }\n Tensor* output = ctx->Output(0, shape);\n auto matrix_size = input_dims[rank - 1] * input_dims[rank - 2];\n if (matrix_size == 0) {\n return Status::OK();\n }\n const fast_divmod row_col_divmod_indices(gsl::narrow_cast(input_dims[rank - 1]));\n const fast_divmod batch_divmod_indices(gsl::narrow_cast(matrix_size));\n\n size_t element_size = input.DataType()->Size();\n return TriluImpl(\n this->Stream(ctx),\n upper_,\n element_size,\n k_val,\n input.DataRaw(),\n output->MutableDataRaw(),\n gsl::narrow(shape.Size()),\n batch_divmod_indices,\n row_col_divmod_indices);\n}\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/rocm/tensor/trilu.h\"\n#include \"core/providers/rocm/tensor/trilu_impl.h\"\n#include \"core/providers/cpu/tensor/utils.h\"\n\nusing namespace onnxruntime::common;\n\nnamespace onnxruntime {\nnamespace rocm {\n\nONNX_OPERATOR_KERNEL_EX(\n Trilu,\n kOnnxDomain,\n 14,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .InputMemoryType(OrtMemTypeCPUInput, 1)\n .MayInplace(0, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes()),\n Trilu);\n\nStatus Trilu::ComputeInternal(OpKernelContext* ctx) const {\n const Tensor* input_ptr = ctx->Input(0);\n const auto* k = ctx->Input(1);\n\n int64_t k_val = 0;\n if (k) {\n ORT_ENFORCE(IsScalarOr1ElementVector(k), \"k should be a 1-D or 0-D tensor.\");\n k_val = *(k->Data());\n }\n if (input_ptr == nullptr) return Status(common::ONNXRUNTIME, common::FAIL, \"input count mismatch\");\n const Tensor& input = *input_ptr;\n const auto& shape = input.Shape();\n const auto& input_dims = shape.GetDims();\n auto rank = input_dims.size();\n if (rank < 2) {\n return Status(ONNXRUNTIME, INVALID_ARGUMENT, \"Input tensor should have a rank of at least 2\");\n }\n Tensor* output = ctx->Output(0, shape);\n auto matrix_size = input_dims[rank - 1] * input_dims[rank - 2];\n if (matrix_size == 0) {\n return Status::OK();\n }\n const fast_divmod row_col_divmod_indices(gsl::narrow_cast(input_dims[rank - 1]));\n const fast_divmod batch_divmod_indices(gsl::narrow_cast(matrix_size));\n\n size_t element_size = input.DataType()->Size();\n return TriluImpl(\n this->Stream(ctx),\n upper_,\n element_size,\n k_val,\n input.DataRaw(),\n output->MutableDataRaw(),\n gsl::narrow(shape.Size()),\n batch_divmod_indices,\n row_col_divmod_indices);\n}\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"core/providers/cpu/generator/constant_of_shape_base.h\"\n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass ConstantOfShape final : public ConstantOfShapeBase<>, public CudaKernel {\n public:\n explicit ConstantOfShape(const OpKernelInfo& info) : ConstantOfShapeBase(info), CudaKernel(info) {}\n\n ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(ConstantOfShape);\n\n Status ComputeInternal(OpKernelContext* ctx) const override;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"core/providers/cpu/generator/constant_of_shape_base.h\"\n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass ConstantOfShape final : public ConstantOfShapeBase<>, public RocmKernel {\n public:\n explicit ConstantOfShape(const OpKernelInfo& info) : ConstantOfShapeBase(info), RocmKernel(info) {}\n\n ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(ConstantOfShape);\n\n Status ComputeInternal(OpKernelContext* ctx) const override;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass Trilu final : public CudaKernel {\n public:\n Trilu(const OpKernelInfo& info) : CudaKernel(info), upper_(info.GetAttrOrDefault(\"upper\", 1) >= 1) {\n }\n ~Trilu() = default;\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n bool upper_;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass Trilu final : public RocmKernel {\n public:\n Trilu(const OpKernelInfo& info) : RocmKernel(info), upper_(info.GetAttrOrDefault(\"upper\", 1) >= 1) {\n }\n ~Trilu() = default;\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n bool upper_;\n};\n\n} // namespace rocm\n} // namespace onnxruntime###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nStatus TriluImpl(\n cudaStream_t stream,\n bool upper,\n size_t element_size,\n int64_t k,\n const void* input_data,\n void* output_data,\n int N,\n const fast_divmod& batch_divmod_indices,\n const fast_divmod& row_col_divmod_indices);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nStatus TriluImpl(\n hipStream_t stream,\n bool upper,\n size_t element_size,\n int64_t k,\n const void* input_data,\n void* output_data,\n int N,\n const fast_divmod& batch_divmod_indices,\n const fast_divmod& row_col_divmod_indices);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/cuda/tensor/unsqueeze.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Unsqueeze,\n kOnnxDomain,\n 1, 10,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .Alias(0, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes()),\n Unsqueeze);\n\n// explicitly support negative axis\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Unsqueeze,\n kOnnxDomain,\n 11, 12,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .Alias(0, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes()),\n Unsqueeze);\n\n// axes is input instead of attribute, support bfloat16\nONNX_OPERATOR_KERNEL_EX(\n Unsqueeze,\n kOnnxDomain,\n 13,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .Alias(0, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .InputMemoryType(OrtMemTypeCPUInput, 1),\n Unsqueeze);\n\nStatus Unsqueeze::ComputeInternal(OpKernelContext* ctx) const {\n Prepare p;\n ORT_RETURN_IF_ERROR(PrepareCompute(ctx, p));\n\n const void* input = p.input_tensor->DataRaw();\n void* output = p.output_tensor->MutableDataRaw();\n if (input == output)\n return Status::OK();\n\n auto count = p.input_tensor->Shape().Size();\n auto element_bytes = p.input_tensor->DataType()->Size();\n CUDA_RETURN_IF_ERROR(cudaMemcpyAsync(output, input, count * element_bytes, cudaMemcpyDeviceToDevice, Stream(ctx)));\n\n return Status::OK();\n}\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/rocm/tensor/unsqueeze.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Unsqueeze,\n kOnnxDomain,\n 1, 10,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .Alias(0, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes()),\n Unsqueeze);\n\n// explicitly support negative axis\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Unsqueeze,\n kOnnxDomain,\n 11, 12,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .Alias(0, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes()),\n Unsqueeze);\n\n// axes is input instead of attribute, support bfloat16\nONNX_OPERATOR_KERNEL_EX(\n Unsqueeze,\n kOnnxDomain,\n 13,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .Alias(0, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .InputMemoryType(OrtMemTypeCPUInput, 1),\n Unsqueeze);\n\nStatus Unsqueeze::ComputeInternal(OpKernelContext* ctx) const {\n Prepare p;\n ORT_RETURN_IF_ERROR(PrepareCompute(ctx, p));\n\n const void* input = p.input_tensor->DataRaw();\n void* output = p.output_tensor->MutableDataRaw();\n if (input == output)\n return Status::OK();\n\n auto count = p.input_tensor->Shape().Size();\n auto element_bytes = p.input_tensor->DataType()->Size();\n HIP_RETURN_IF_ERROR(hipMemcpyAsync(output, input, count * element_bytes, hipMemcpyDeviceToDevice, Stream(ctx)));\n\n return Status::OK();\n}\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"core/providers/cpu/tensor/unsqueeze.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass Unsqueeze final : public UnsqueezeBase, public CudaKernel {\n public:\n Unsqueeze(const OpKernelInfo& info) : UnsqueezeBase(info), CudaKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"core/providers/cpu/tensor/unsqueeze.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass Unsqueeze final : public UnsqueezeBase, public RocmKernel {\n public:\n Unsqueeze(const OpKernelInfo& info) : UnsqueezeBase(info), RocmKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"core/providers/cpu/tensor/upsamplebase.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nclass Upsample : public UpsampleBase, public CudaKernel {\n public:\n Upsample(const OpKernelInfo& info) : UpsampleBase(info), CudaKernel(info) {\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n Status BaseCompute(OpKernelContext* context, const std::vector& roi, const std::vector& scales,\n const gsl::span& output_dims) const;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"core/providers/cpu/tensor/upsamplebase.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nclass Upsample : public UpsampleBase, public RocmKernel {\n public:\n Upsample(const OpKernelInfo& info) : UpsampleBase(info), RocmKernel(info) {\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n Status BaseCompute(OpKernelContext* context, const std::vector& roi, const std::vector& scales,\n const gsl::span& output_dims) const;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n#include \"core/common/common.h\"\n#include \"core/providers/cpu/tensor/upsamplebase.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nvoid UpampleImpl(cudaStream_t stream,\n const onnxruntime::UpsampleMode upsample_mode,\n const size_t rank,\n const int64_t input_dim2,\n const TArray& input_pitches,\n const TArray& output_div_pitches,\n const TArray& scales_div,\n const T* input_data,\n T* output_data,\n const size_t N);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n#include \"core/common/common.h\"\n#include \"core/providers/cpu/tensor/upsamplebase.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nvoid UpampleImpl(hipStream_t stream,\n const onnxruntime::UpsampleMode upsample_mode,\n const size_t rank,\n const int64_t input_dim2,\n const TArray& input_pitches,\n const TArray& output_div_pitches,\n const TArray& scales_div,\n const T* input_data,\n T* output_data,\n const size_t N);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nclass Where final : public CudaKernel {\n public:\n Where(const OpKernelInfo& info) : CudaKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nclass Where final : public RocmKernel {\n public:\n Where(const OpKernelInfo& info) : RocmKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n#include \"core/common/common.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nvoid WhereImpl(\n cudaStream_t stream,\n size_t output_rank_or_simple_broadcast,\n BroadcastIndexType cond_index_type,\n const TArray& cond_padded_strides,\n const bool* cond_data,\n BroadcastIndexType x_index_type,\n const TArray& x_padded_strides,\n const T* x_data,\n BroadcastIndexType y_index_type,\n const TArray& y_padded_strides,\n const T* y_data,\n const TArray& fdm_output_strides,\n T* output_data,\n size_t count);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n#include \"core/common/common.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nvoid WhereImpl(\n hipStream_t stream,\n size_t output_rank_or_simple_broadcast,\n BroadcastIndexType cond_index_type,\n const TArray& cond_padded_strides,\n const bool* cond_data,\n BroadcastIndexType x_index_type,\n const TArray& x_padded_strides,\n const T* x_data,\n BroadcastIndexType y_index_type,\n const TArray& y_padded_strides,\n const T* y_data,\n const TArray& fdm_output_strides,\n T* output_data,\n size_t count);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n#ifndef NDEBUG\nnamespace onnxruntime {\nnamespace cuda {\nnamespace test {\n\n// Test header provides function declarations in EP-side bridge.\nbool TestDeferredRelease();\nbool TestDeferredReleaseWithoutArena();\nbool TestBeamSearchTopK();\nbool TestGreedySearchTopOne();\n\n} // namespace test\n} // namespace cuda\n} // namespace onnxruntime\n#endif\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n#ifndef NDEBUG\nnamespace onnxruntime {\nnamespace rocm {\nnamespace test {\n\n// Test header provides function declarations in EP-side bridge.\nbool TestDeferredRelease();\nbool TestDeferredReleaseWithoutArena();\nbool TestBeamSearchTopK();\nbool TestGreedySearchTopOne();\n\n} // namespace test\n} // namespace rocm\n} // namespace onnxruntime\n#endif\n###" }, { "cuda": "\n\n\n\n\n\n\n#ifndef NDEBUG\n#include \n#include \"core/providers/cuda/test/all_tests.h\"\n#include \"core/providers/cuda/cuda_execution_provider.h\"\n#include \"core/providers/cuda/cuda_allocator.h\"\n#include \"core/providers/cuda/cuda_stream_handle.h\"\nnamespace onnxruntime {\nnamespace cuda {\nnamespace test {\n\n\nbool TestDeferredRelease() {\n \n CUDAExecutionProviderInfo info;\n CUDAExecutionProvider ep(info);\n \n onnxruntime::AllocatorManager allocator_manager;\n ep.RegisterAllocator(allocator_manager);\n AllocatorPtr gpu_alloctor = ep.GetAllocator(OrtMemType::OrtMemTypeDefault);\n \n \n AllocatorPtr cpu_pinned_alloc = ep.GetAllocator(OrtMemTypeCPU);\n \n \n CudaStream stream(nullptr, gpu_alloctor->Info().device, cpu_pinned_alloc, false, true, nullptr, nullptr);\n \n const size_t n_bytes = 10 * 1000000;\n const int64_t n_allocs = 64;\n ORT_THROW_IF_ERROR(ep.OnRunStart());\n for (size_t i = 0; i < n_allocs; ++i) {\n \n auto pinned_buffer = ep.AllocateBufferOnCPUPinned(n_bytes);\n \n stream.EnqueDeferredCPUBuffer(pinned_buffer.release());\n }\n \n AllocatorStats stats;\n cpu_pinned_alloc->GetStats(&stats);\n ORT_ENFORCE(stats.num_allocs == n_allocs);\n ORT_THROW_IF_ERROR(stream.CleanUpOnRunEnd());\n ORT_THROW_IF_ERROR(ep.OnRunEnd(true));\n return true;\n}\nbool TestDeferredReleaseWithoutArena() {\n \n CUDAExecutionProviderInfo info;\n CUDAExecutionProvider ep(info);\n \n onnxruntime::AllocatorManager allocator_manager;\n OrtDevice pinned_device{OrtDevice::CPU, OrtDevice::MemType::CUDA_PINNED, DEFAULT_CPU_ALLOCATOR_DEVICE_ID};\n \n AllocatorCreationInfo pinned_memory_info(\n [](OrtDevice::DeviceId device_id) {\n return std::make_unique(device_id, CUDA_PINNED);\n }, pinned_device.Id(), false );\n auto cuda_pinned_alloc = CreateAllocator(pinned_memory_info);\n allocator_manager.InsertAllocator(cuda_pinned_alloc);\n \n \n ep.RegisterAllocator(allocator_manager);\n AllocatorPtr gpu_alloctor = ep.GetAllocator(OrtMemType::OrtMemTypeDefault);\n \n \n AllocatorPtr cpu_pinned_alloc = ep.GetAllocator(OrtMemTypeCPU);\n \n \n CudaStream stream(nullptr, gpu_alloctor->Info().device, cpu_pinned_alloc, false, true, nullptr, nullptr);\n \n const size_t n_bytes = 10 * 1000000;\n const int64_t n_allocs = 64;\n ORT_THROW_IF_ERROR(ep.OnRunStart());\n for (size_t i = 0; i < n_allocs; ++i) {\n \n auto pinned_buffer = ep.AllocateBufferOnCPUPinned(n_bytes);\n \n stream.EnqueDeferredCPUBuffer(pinned_buffer.release());\n }\n ORT_THROW_IF_ERROR(stream.CleanUpOnRunEnd());\n ORT_THROW_IF_ERROR(ep.OnRunEnd(true));\n return true;\n}\n} \n} \n} \n#endif\n\n###", "hip": " \n\n\n\n\n\n#ifndef NDEBUG\n#include \n#include \"core/providers/rocm/test/all_tests.h\"\n#include \"core/providers/rocm/rocm_execution_provider.h\"\n#include \"core/providers/rocm/rocm_allocator.h\"\n#include \"core/providers/rocm/rocm_stream_handle.h\"\nnamespace onnxruntime {\nnamespace rocm {\nnamespace test {\n\n\nbool TestDeferredRelease() {\n \n ROCMExecutionProviderInfo info;\n ROCMExecutionProvider ep(info);\n \n onnxruntime::AllocatorManager allocator_manager;\n ep.RegisterAllocator(allocator_manager);\n AllocatorPtr gpu_alloctor = ep.GetAllocator(OrtMemType::OrtMemTypeDefault);\n \n \n AllocatorPtr cpu_pinned_alloc = ep.GetAllocator(OrtMemTypeCPU);\n \n \n RocmStream stream(nullptr, gpu_alloctor->Info().device, cpu_pinned_alloc, false, true, nullptr, nullptr);\n \n const size_t n_bytes = 10 * 1000000;\n const int64_t n_allocs = 64;\n ORT_THROW_IF_ERROR(ep.OnRunStart());\n for (size_t i = 0; i < n_allocs; ++i) {\n \n auto pinned_buffer = ep.AllocateBufferOnCPUPinned(n_bytes);\n \n stream.EnqueDeferredCPUBuffer(pinned_buffer.release());\n }\n \n AllocatorStats stats;\n cpu_pinned_alloc->GetStats(&stats);\n ORT_ENFORCE(stats.num_allocs == n_allocs);\n ORT_THROW_IF_ERROR(stream.CleanUpOnRunEnd());\n ORT_THROW_IF_ERROR(ep.OnRunEnd(true));\n return true;\n}\nbool TestDeferredReleaseWithoutArena() {\n \n ROCMExecutionProviderInfo info;\n ROCMExecutionProvider ep(info);\n \n onnxruntime::AllocatorManager allocator_manager;\n OrtDevice pinned_device{OrtDevice::CPU, OrtDevice::MemType::CUDA_PINNED, DEFAULT_CPU_ALLOCATOR_DEVICE_ID};\n \n AllocatorCreationInfo pinned_memory_info(\n [](OrtDevice::DeviceId device_id) {\n return std::make_unique(device_id, CUDA_PINNED);\n }, pinned_device.Id(), false );\n auto rocm_pinned_alloc = CreateAllocator(pinned_memory_info);\n allocator_manager.InsertAllocator(rocm_pinned_alloc);\n \n \n ep.RegisterAllocator(allocator_manager);\n AllocatorPtr gpu_alloctor = ep.GetAllocator(OrtMemType::OrtMemTypeDefault);\n \n \n AllocatorPtr cpu_pinned_alloc = ep.GetAllocator(OrtMemTypeCPU);\n \n \n RocmStream stream(nullptr, gpu_alloctor->Info().device, cpu_pinned_alloc, false, true, nullptr, nullptr);\n \n const size_t n_bytes = 10 * 1000000;\n const int64_t n_allocs = 64;\n ORT_THROW_IF_ERROR(ep.OnRunStart());\n for (size_t i = 0; i < n_allocs; ++i) {\n \n auto pinned_buffer = ep.AllocateBufferOnCPUPinned(n_bytes);\n \n stream.EnqueDeferredCPUBuffer(pinned_buffer.release());\n }\n ORT_THROW_IF_ERROR(stream.CleanUpOnRunEnd());\n ORT_THROW_IF_ERROR(ep.OnRunEnd(true));\n return true;\n}\n} \n} \n} \n#endif###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/framework/random_generator.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\n#define RANDOM_KERNEL_DECLARE(name) \\\n template \\\n void name##KernelImpl(const cudaDeviceProp& prop, cudaStream_t stream, const int64_t N, const float alpha, \\\n const float beta, PhiloxGenerator& generator, T* Y_data);\n\nRANDOM_KERNEL_DECLARE(RandomNormal)\nRANDOM_KERNEL_DECLARE(RandomUniform)\n\n#undef RANDOM_KERNEL_DECLARE\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/framework/random_generator.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\n#define RANDOM_KERNEL_DECLARE(name) \\\n template \\\n void name##KernelImpl(const hipDeviceProp_t& prop, hipStream_t stream, const int64_t N, const float alpha, \\\n const float beta, PhiloxGenerator& generator, T* Y_data);\n\nRANDOM_KERNEL_DECLARE(RandomNormal)\nRANDOM_KERNEL_DECLARE(RandomUniform)\n\n#undef RANDOM_KERNEL_DECLARE\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/cuda_common.h\"\n#include \n\nnamespace onnxruntime {\nnamespace cuda {\n\nstruct TritonKernelMetaData {\n int num_warps;\n int shared_mem_size;\n CUfunction func;\n std::unordered_map constants;\n std::string name;\n};\n\nnamespace {\n\ntemplate \nstruct DataTypeToName;\n\n#define DTYPE_TO_STR(type, name) \\\n template <> \\\n struct DataTypeToName { \\\n constexpr static const char* value = name; \\\n };\n\nDTYPE_TO_STR(float, \"fp32\");\nDTYPE_TO_STR(half, \"fp16\");\nDTYPE_TO_STR(double, \"fp64\");\nDTYPE_TO_STR(BFloat16, \"bf16\");\n\n} // end of namespace\n\ntemplate \nconst std::string GetDataTypeName() {\n return DataTypeToName::value;\n}\n\nvoid LoadOrtTritonKernel();\n\nStatus LaunchTritonKernel(cudaStream_t stream, std::string fname, int grid0, int grid1, int grid2, void* args, size_t args_size);\n\nconst TritonKernelMetaData* GetOrtTritonKernelMetadata(size_t idx);\n\nconst std::vector* GetOrtTritonKernelByGroup(std::string group_name);\n\nStatus LaunchTritonKernel(cudaStream_t stream, size_t idx, int grid0, int grid1, int grid2, void* args, size_t args_size);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/rocm_common.h\"\n#include \n\nnamespace onnxruntime {\nnamespace rocm {\n\nstruct TritonKernelMetaData {\n int num_warps;\n int shared_mem_size;\n hipFunction_t func;\n std::unordered_map constants;\n std::string name;\n};\n\nnamespace {\n\ntemplate \nstruct DataTypeToName;\n\n#define DTYPE_TO_STR(type, name) \\\n template <> \\\n struct DataTypeToName { \\\n constexpr static const char* value = name; \\\n };\n\nDTYPE_TO_STR(float, \"fp32\");\nDTYPE_TO_STR(half, \"fp16\");\nDTYPE_TO_STR(double, \"fp64\");\nDTYPE_TO_STR(BFloat16, \"bf16\");\n\n} // end of namespace\n\ntemplate \nconst std::string GetDataTypeName() {\n return DataTypeToName::value;\n}\n\nvoid LoadOrtTritonKernel();\n\nStatus LaunchTritonKernel(hipStream_t stream, std::string fname, int grid0, int grid1, int grid2, void* args, size_t args_size);\n\nconst TritonKernelMetaData* GetOrtTritonKernelMetadata(size_t idx);\n\nconst std::vector* GetOrtTritonKernelByGroup(std::string group_name);\n\nStatus LaunchTritonKernel(hipStream_t stream, size_t idx, int grid0, int grid1, int grid2, void* args, size_t args_size);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"activations.h\"\n#include \"core/framework/op_kernel.h\"\n\nusing namespace onnxruntime::cuda;\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\n#define REGISTER_ACTIVATION_KERNEL(x, ver, domain, T) \\\n ONNX_OPERATOR_TYPED_KERNEL_EX( \\\n x, \\\n domain, \\\n ver, \\\n T, \\\n kCudaExecutionProvider, \\\n (*KernelDefBuilder::Create()) \\\n .TypeConstraint(\"T\", DataTypeImpl::GetTensorType()) \\\n .MayInplace(0, 0), \\\n x);\n\n#define UNARY_ACTIVATION_COMPUTE(x, T) \\\n template <> \\\n Status x::ComputeInternal(OpKernelContext* context) const { \\\n UnaryElementwisePreparation p; \\\n ORT_RETURN_IF_ERROR(UnaryElementwise::Prepare(context, &p)); \\\n Ctx##x func_ctx = MakeFuncCtx(); \\\n Impl_##x::MappedType>( \\\n Stream(context), \\\n reinterpret_cast::MappedType*>(p.input_tensor->Data()), \\\n reinterpret_cast::MappedType*>(p.output_tensor->MutableData()), \\\n &func_ctx, p.output_tensor->Shape().Size()); \\\n \\\n return Status::OK(); \\\n }\n\n#define UNARY_ACTIVATION_OP_TYPED(name, ver, domain, T) \\\n REGISTER_ACTIVATION_KERNEL(name, ver, domain, T) \\\n UNARY_ACTIVATION_COMPUTE(name, T)\n\n#define UNARY_ACTIVATION_OP_HFD(name, ver, domain) \\\n UNARY_ACTIVATION_OP_TYPED(name, ver, domain, MLFloat16) \\\n UNARY_ACTIVATION_OP_TYPED(name, ver, domain, float) \\\n UNARY_ACTIVATION_OP_TYPED(name, ver, domain, double)\n\nUNARY_ACTIVATION_OP_HFD(Affine, 1, kOnnxDomain);\nUNARY_ACTIVATION_OP_HFD(ParametricSoftplus, 1, kOnnxDomain);\nUNARY_ACTIVATION_OP_HFD(ScaledTanh, 1, kOnnxDomain);\nUNARY_ACTIVATION_OP_HFD(Gelu, 1, kMSDomain);\nUNARY_ACTIVATION_OP_HFD(QuickGelu, 1, kMSDomain);\n\nREGISTER_ACTIVATION_KERNEL(ThresholdedRelu, 1, kOnnxDomain, MLFloat16)\nREGISTER_ACTIVATION_KERNEL(ThresholdedRelu, 1, kOnnxDomain, float)\nREGISTER_ACTIVATION_KERNEL(ThresholdedRelu, 1, kOnnxDomain, double)\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"activations.h\"\n#include \"core/framework/op_kernel.h\"\n\nusing namespace onnxruntime::rocm;\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\n#define REGISTER_ACTIVATION_KERNEL(x, ver, domain, T) \\\n ONNX_OPERATOR_TYPED_KERNEL_EX( \\\n x, \\\n domain, \\\n ver, \\\n T, \\\n kRocmExecutionProvider, \\\n (*KernelDefBuilder::Create()) \\\n .TypeConstraint(\"T\", DataTypeImpl::GetTensorType()) \\\n .MayInplace(0, 0), \\\n x);\n\n#define UNARY_ACTIVATION_COMPUTE(x, T) \\\n template <> \\\n Status x::ComputeInternal(OpKernelContext* context) const { \\\n UnaryElementwisePreparation p; \\\n ORT_RETURN_IF_ERROR(UnaryElementwise::Prepare(context, &p)); \\\n Ctx##x func_ctx = MakeFuncCtx(); \\\n Impl_##x::MappedType>( \\\n Stream(context), \\\n reinterpret_cast::MappedType*>(p.input_tensor->Data()), \\\n reinterpret_cast::MappedType*>(p.output_tensor->MutableData()), \\\n &func_ctx, p.output_tensor->Shape().Size()); \\\n \\\n return Status::OK(); \\\n }\n\n#define UNARY_ACTIVATION_OP_TYPED(name, ver, domain, T) \\\n REGISTER_ACTIVATION_KERNEL(name, ver, domain, T) \\\n UNARY_ACTIVATION_COMPUTE(name, T)\n\n#define UNARY_ACTIVATION_OP_HFD(name, ver, domain) \\\n UNARY_ACTIVATION_OP_TYPED(name, ver, domain, MLFloat16) \\\n UNARY_ACTIVATION_OP_TYPED(name, ver, domain, float) \\\n UNARY_ACTIVATION_OP_TYPED(name, ver, domain, double)\n\nUNARY_ACTIVATION_OP_HFD(Affine, 1, kOnnxDomain);\nUNARY_ACTIVATION_OP_HFD(ParametricSoftplus, 1, kOnnxDomain);\nUNARY_ACTIVATION_OP_HFD(ScaledTanh, 1, kOnnxDomain);\nUNARY_ACTIVATION_OP_HFD(Gelu, 1, kMSDomain);\nUNARY_ACTIVATION_OP_HFD(QuickGelu, 1, kMSDomain);\n\nREGISTER_ACTIVATION_KERNEL(ThresholdedRelu, 1, kOnnxDomain, MLFloat16)\nREGISTER_ACTIVATION_KERNEL(ThresholdedRelu, 1, kOnnxDomain, float)\nREGISTER_ACTIVATION_KERNEL(ThresholdedRelu, 1, kOnnxDomain, double)\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/cuda_common.h\"\n#include \"core/providers/cuda/math/unary_elementwise_ops.h\"\n#include \"core/providers/cuda/math/binary_elementwise_ops.h\"\n#include \"core/providers/cuda/activation/activations.h\"\n#include \"activations_impl.h\"\n\nusing namespace onnxruntime::cuda;\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\ntemplate \nclass Affine final : public UnaryElementwise {\n public:\n Affine(const OpKernelInfo& info) : UnaryElementwise(info) {\n ORT_ENFORCE(info.GetAttr(\"alpha\", &alpha_).IsOK());\n ORT_ENFORCE(info.GetAttr(\"beta\", &beta_).IsOK());\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n MAKE_FUNC_CTX_ALPHA_BETA()\n\n float alpha_;\n float beta_;\n};\n\ntemplate \nclass ParametricSoftplus final : public UnaryElementwise {\n public:\n ParametricSoftplus(const OpKernelInfo& info) : UnaryElementwise(info) {\n ORT_ENFORCE(info.GetAttr(\"alpha\", &alpha_).IsOK());\n ORT_ENFORCE(info.GetAttr(\"beta\", &beta_).IsOK());\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n MAKE_FUNC_CTX_ALPHA_BETA()\n\n float alpha_;\n float beta_;\n};\n\ntemplate \nclass ScaledTanh final : public UnaryElementwise {\n public:\n ScaledTanh(const OpKernelInfo& info) : UnaryElementwise(info) {\n ORT_ENFORCE(info.GetAttr(\"alpha\", &alpha_).IsOK());\n ORT_ENFORCE(info.GetAttr(\"beta\", &beta_).IsOK());\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n MAKE_FUNC_CTX_ALPHA_BETA()\n\n float alpha_;\n float beta_;\n};\n\ntemplate \nclass Gelu final : public UnaryElementwise {\n public:\n Gelu(const OpKernelInfo& info) : UnaryElementwise(info) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n MAKE_FUNC_CTX_NULL()\n};\n\ntemplate \nclass QuickGelu final : public UnaryElementwise {\n public:\n QuickGelu(const OpKernelInfo& info) : UnaryElementwise(info) {\n alpha_ = info.GetAttrOrDefault(\"alpha\", 1.702f);\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n MAKE_FUNC_CTX_ALPHA()\n float alpha_;\n};\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/rocm_common.h\"\n#include \"core/providers/rocm/math/unary_elementwise_ops.h\"\n#include \"core/providers/rocm/math/binary_elementwise_ops.h\"\n#include \"core/providers/rocm/activation/activations.h\"\n#include \"activations_impl.h\"\n\nusing namespace onnxruntime::rocm;\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\ntemplate \nclass Affine final : public UnaryElementwise {\n public:\n Affine(const OpKernelInfo& info) : UnaryElementwise(info) {\n ORT_ENFORCE(info.GetAttr(\"alpha\", &alpha_).IsOK());\n ORT_ENFORCE(info.GetAttr(\"beta\", &beta_).IsOK());\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n MAKE_FUNC_CTX_ALPHA_BETA()\n\n float alpha_;\n float beta_;\n};\n\ntemplate \nclass ParametricSoftplus final : public UnaryElementwise {\n public:\n ParametricSoftplus(const OpKernelInfo& info) : UnaryElementwise(info) {\n ORT_ENFORCE(info.GetAttr(\"alpha\", &alpha_).IsOK());\n ORT_ENFORCE(info.GetAttr(\"beta\", &beta_).IsOK());\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n MAKE_FUNC_CTX_ALPHA_BETA()\n\n float alpha_;\n float beta_;\n};\n\ntemplate \nclass ScaledTanh final : public UnaryElementwise {\n public:\n ScaledTanh(const OpKernelInfo& info) : UnaryElementwise(info) {\n ORT_ENFORCE(info.GetAttr(\"alpha\", &alpha_).IsOK());\n ORT_ENFORCE(info.GetAttr(\"beta\", &beta_).IsOK());\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n MAKE_FUNC_CTX_ALPHA_BETA()\n\n float alpha_;\n float beta_;\n};\n\ntemplate \nclass Gelu final : public UnaryElementwise {\n public:\n Gelu(const OpKernelInfo& info) : UnaryElementwise(info) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n MAKE_FUNC_CTX_NULL()\n};\n\ntemplate \nclass QuickGelu final : public UnaryElementwise {\n public:\n QuickGelu(const OpKernelInfo& info) : UnaryElementwise(info) {\n alpha_ = info.GetAttrOrDefault(\"alpha\", 1.702f);\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n MAKE_FUNC_CTX_ALPHA()\n float alpha_;\n};\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n\n\n#include \n#include \"activations_impl.h\"\n#include \"core/providers/cuda/cu_inc/common.cuh\"\n#include \"core/providers/cuda/cu_inc/unary_elementwise_impl.cuh\"\nusing namespace onnxruntime::cuda;\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\ntemplate \nstruct OP_Affine : public CtxAffine {\n __device__ __inline__ T operator()(const T& a) const {\n return a * (T)alpha + (T)beta;\n }\n};\ntemplate \nstruct OP_ParametricSoftplus : public CtxParametricSoftplus {\n __device__ __inline__ T operator()(const T& a) const {\n if (a > (T)0)\n return (T)alpha * (a * (T)beta + _Log(_Exp(-a * (T)beta) + (T)1));\n else\n return (T)alpha * _Log(_Exp(a * (T)beta) + (T)1);\n }\n};\ntemplate \nstruct OP_ScaledTanh : public CtxScaledTanh {\n __device__ __inline__ T operator()(const T& a) const {\n return (T)alpha * _Tanh(a * (T)beta);\n }\n};\ntemplate \nstruct OP_Gelu : public CtxGelu {\n __device__ __inline__ T operator()(const T& a) const {\n return _Gelu(a);\n }\n};\ntemplate <>\nstruct OP_Gelu : public CtxGelu {\n __device__ __inline__ half operator()(const half& a) const {\n return static_cast(_Gelu(static_cast(a)));\n }\n};\ntemplate \nstruct OP_QuickGelu : public CtxQuickGelu {\n __device__ __inline__ T operator()(const T& a) const {\n T v = a * static_cast(alpha);\n T one = static_cast(1.f);\n T zero = static_cast(0.f);\n T sigmoid = v >= zero ? one / (one + _Exp(-v)) : one - one / (one + _Exp(v));\n return a * sigmoid;\n }\n};\n#define UNARY_ACTIVATION_IMPL(name) UNARY_ACTIVATION_IMPL_DECLARATION(name) { UnaryElementWiseImpl(stream, input_data, output_data, *reinterpret_cast*>(func_ctx), count); }\n#define SPECIALIZED_UNARY_ACTIVATION_IMPL(name, T) template void Impl_##name(cudaStream_t stream, const T* input_data, T* output_data, const Ctx##name* func_ctx, size_t count);\n#define SPECIALIZED_UNARY_ACTIVATIONL_HFD(name) SPECIALIZED_UNARY_ACTIVATION_IMPL(name, half) SPECIALIZED_UNARY_ACTIVATION_IMPL(name, float) SPECIALIZED_UNARY_ACTIVATION_IMPL(name, double)\n#define UNARY_ACTIVATION_OP_NAME(name) UNARY_ACTIVATION_IMPL(name); SPECIALIZED_UNARY_ACTIVATIONL_HFD(name)\nUNARY_CONTRIB_ACTIVATION_OPS()\n#undef UNARY_ACTIVATION_OP_NAME\n} \n} \n} \n\n###", "hip": " \n\n#include \n#include \"activations_impl.h\"\n#include \"core/providers/rocm/cu_inc/common.cuh\"\n#include \"core/providers/rocm/cu_inc/unary_elementwise_impl.cuh\"\nusing namespace onnxruntime::rocm;\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\ntemplate \nstruct OP_Affine : public CtxAffine {\n __device__ __inline__ T operator()(const T& a) const {\n return a * (T)alpha + (T)beta;\n }\n};\ntemplate \nstruct OP_ParametricSoftplus : public CtxParametricSoftplus {\n __device__ __inline__ T operator()(const T& a) const {\n if (a > (T)0)\n return (T)alpha * (a * (T)beta + _Log(_Exp(-a * (T)beta) + (T)1));\n else\n return (T)alpha * _Log(_Exp(a * (T)beta) + (T)1);\n }\n};\ntemplate \nstruct OP_ScaledTanh : public CtxScaledTanh {\n __device__ __inline__ T operator()(const T& a) const {\n return (T)alpha * _Tanh(a * (T)beta);\n }\n};\ntemplate \nstruct OP_Gelu : public CtxGelu {\n __device__ __inline__ T operator()(const T& a) const {\n return _Gelu(a);\n }\n};\ntemplate <>\nstruct OP_Gelu : public CtxGelu {\n __device__ __inline__ half operator()(const half& a) const {\n return static_cast(_Gelu(static_cast(a)));\n }\n};\ntemplate \nstruct OP_QuickGelu : public CtxQuickGelu {\n __device__ __inline__ T operator()(const T& a) const {\n T v = a * static_cast(alpha);\n T one = static_cast(1.f);\n T zero = static_cast(0.f);\n T sigmoid = v >= zero ? one / (one + _Exp(-v)) : one - one / (one + _Exp(v));\n return a * sigmoid;\n }\n};\n#define UNARY_ACTIVATION_IMPL(name) UNARY_ACTIVATION_IMPL_DECLARATION(name) { UnaryElementWiseImpl(stream, input_data, output_data, *reinterpret_cast*>(func_ctx), count); }\n#define SPECIALIZED_UNARY_ACTIVATION_IMPL(name, T) template void Impl_##name(hipStream_t stream, const T* input_data, T* output_data, const Ctx##name* func_ctx, size_t count);\n#define SPECIALIZED_UNARY_ACTIVATIONL_HFD(name) SPECIALIZED_UNARY_ACTIVATION_IMPL(name, half) SPECIALIZED_UNARY_ACTIVATION_IMPL(name, float) SPECIALIZED_UNARY_ACTIVATION_IMPL(name, double)\n#define UNARY_ACTIVATION_OP_NAME(name) UNARY_ACTIVATION_IMPL(name); SPECIALIZED_UNARY_ACTIVATIONL_HFD(name)\nUNARY_CONTRIB_ACTIVATION_OPS()\n#undef UNARY_ACTIVATION_OP_NAME\n} \n} \n} ###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/cuda/activation/activations_impl.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\ntypedef onnxruntime::cuda::CtxAlphaBeta CtxAffine;\ntypedef onnxruntime::cuda::CtxAlphaBeta CtxParametricSoftplus;\ntypedef onnxruntime::cuda::CtxAlphaBeta CtxScaledTanh;\ntypedef onnxruntime::cuda::CtxNull CtxGelu;\ntypedef onnxruntime::cuda::CtxAlpha CtxQuickGelu;\n\n#define UNARY_CONTRIB_ACTIVATION_OPS() \\\n UNARY_ACTIVATION_OP_NAME(ScaledTanh) \\\n UNARY_ACTIVATION_OP_NAME(Affine) \\\n UNARY_ACTIVATION_OP_NAME(ParametricSoftplus) \\\n UNARY_ACTIVATION_OP_NAME(Gelu) \\\n UNARY_ACTIVATION_OP_NAME(QuickGelu)\n\n#define UNARY_ACTIVATION_OP_NAME(name) UNARY_ACTIVATION_IMPL_DECLARATION(name);\nUNARY_CONTRIB_ACTIVATION_OPS()\n#undef UNARY_ACTIVATION_OP_NAME\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/rocm/activation/activations_impl.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\ntypedef onnxruntime::rocm::CtxAlphaBeta CtxAffine;\ntypedef onnxruntime::rocm::CtxAlphaBeta CtxParametricSoftplus;\ntypedef onnxruntime::rocm::CtxAlphaBeta CtxScaledTanh;\ntypedef onnxruntime::rocm::CtxNull CtxGelu;\ntypedef onnxruntime::rocm::CtxAlpha CtxQuickGelu;\n\n#define UNARY_CONTRIB_ACTIVATION_OPS() \\\n UNARY_ACTIVATION_OP_NAME(ScaledTanh) \\\n UNARY_ACTIVATION_OP_NAME(Affine) \\\n UNARY_ACTIVATION_OP_NAME(ParametricSoftplus) \\\n UNARY_ACTIVATION_OP_NAME(Gelu) \\\n UNARY_ACTIVATION_OP_NAME(QuickGelu)\n\n#define UNARY_ACTIVATION_OP_NAME(name) UNARY_ACTIVATION_IMPL_DECLARATION(name);\nUNARY_CONTRIB_ACTIVATION_OPS()\n#undef UNARY_ACTIVATION_OP_NAME\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"contrib_ops/cpu/aten_ops/aten_op.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\nONNX_OPERATOR_KERNEL_EX(\n ATen, kPytorchAtenDomain, 1, kCudaExecutionProvider,\n (*KernelDefBuilder::Create()).TypeConstraint(\"T\", DataTypeImpl::AllTensorAndSequenceTensorTypes()),\n onnxruntime::contrib::ATen);\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"contrib_ops/cpu/aten_ops/aten_op.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\nONNX_OPERATOR_KERNEL_EX(\n ATen, kPytorchAtenDomain, 1, kRocmExecutionProvider,\n (*KernelDefBuilder::Create()).TypeConstraint(\"T\", DataTypeImpl::AllTensorAndSequenceTensorTypes()),\n onnxruntime::contrib::ATen);\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n\n\n#pragma once\n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ntemplate \nvoid LaunchAddBiasTranspose(\n cudaStream_t stream, const int num_matrices, const int format, const int max_threads_per_block, const int batch_size, const int sequence_length, const int num_heads, const int qk_head_size, const T* input, const T* biases, T* output, bool enable_half4, const int v_head_size, T* qkv_add_bias = nullptr, int total_matrix_count = -1, bool do_rotary = false, int original_past_sequence_length = 0);\n\n\n\n\n\n\n\ntemplate \nvoid LaunchAddBiasTransposeTrt(\n cudaStream_t stream, const int max_threads_per_block, const int batch_size, const int sequence_length, const int num_heads, const int head_size, const T* biases, const T* query, const T* key, const T* value, T* output, bool is_cross_attention, int kv_sequence_length = -1);\n\n\n\n\ntemplate \nvoid LaunchAddBias(\n cudaStream_t stream, const int max_threads_per_block, const int batch_size, const int sequence_length, const int kv_sequence_length, const int num_heads, const int head_size, const int v_head_size, const T* biases, const T* query, const T* key, const T* value, T* q, T* k, T* v);\n} \n} \n} \n\n###", "hip": " \n\n#pragma once\n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ntemplate \nvoid LaunchAddBiasTranspose(\n hipStream_t stream, const int num_matrices, const int format, const int max_threads_per_block, const int batch_size, const int sequence_length, const int num_heads, const int qk_head_size, const T* input, const T* biases, T* output, bool enable_half4, const int v_head_size, T* qkv_add_bias = nullptr, int total_matrix_count = -1, bool do_rotary = false, int original_past_sequence_length = 0);\n\n\n\n\n\n\n\ntemplate \nvoid LaunchAddBiasTransposeTrt(\n hipStream_t stream, const int max_threads_per_block, const int batch_size, const int sequence_length, const int num_heads, const int head_size, const T* biases, const T* query, const T* key, const T* value, T* output, bool is_cross_attention, int kv_sequence_length = -1);\n\n\n\n\ntemplate \nvoid LaunchAddBias(\n hipStream_t stream, const int max_threads_per_block, const int batch_size, const int sequence_length, const int kv_sequence_length, const int num_heads, const int head_size, const int v_head_size, const T* biases, const T* query, const T* key, const T* value, T* q, T* k, T* v);\n} \n} \n} ###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n#include \n#include \n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\n// Build token indice for non-padding tokens and padding tokens.\nvoid LaunchGetTokenOffset(int* token_count_buffer,\n int* token_offset,\n int* cumulated_token_count,\n const int* sequence_token_count,\n const int batch_size,\n const int sequence_length,\n cudaStream_t stream);\n\n// Remove paddings from input.\ntemplate \nvoid LaunchRemovePadding(\n T* output, const T* input, const int* token_offset, const int token_count, const int hidden_size,\n cudaStream_t stream);\n\n// Rebuild paddings to restore output shape.\ntemplate \nvoid LaunchRestorePadding(\n T* output, const T* input, const int* token_offset, const int token_count, const int hidden_size,\n const int batch_size, const int sequence_length,\n cudaStream_t stream);\n\n// Padding offset for TensorRT fused attention kernel\nvoid LaunchTrtSequenceOffset(int* trt_mha_padding_offset,\n const int* mask_index,\n const int batch_size,\n cudaStream_t stream);\n\nvoid LaunchTrtSequenceOffset(int* trt_mha_padding_offset,\n const int* mask_index,\n const int batch_size,\n const int sequence_length,\n cudaStream_t stream);\n\nvoid LaunchTrtSequenceOffset2d(int* trt_mha_padding_offset,\n const int* mask_index,\n const int batch_size,\n const int sequence_length,\n cudaStream_t stream);\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n#include \n#include \n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\n// Build token indice for non-padding tokens and padding tokens.\nvoid LaunchGetTokenOffset(int* token_count_buffer,\n int* token_offset,\n int* cumulated_token_count,\n const int* sequence_token_count,\n const int batch_size,\n const int sequence_length,\n hipStream_t stream);\n\n// Remove paddings from input.\ntemplate \nvoid LaunchRemovePadding(\n T* output, const T* input, const int* token_offset, const int token_count, const int hidden_size,\n hipStream_t stream);\n\n// Rebuild paddings to restore output shape.\ntemplate \nvoid LaunchRestorePadding(\n T* output, const T* input, const int* token_offset, const int token_count, const int hidden_size,\n const int batch_size, const int sequence_length,\n hipStream_t stream);\n\n// Padding offset for TensorRT fused attention kernel\nvoid LaunchTrtSequenceOffset(int* trt_mha_padding_offset,\n const int* mask_index,\n const int batch_size,\n hipStream_t stream);\n\nvoid LaunchTrtSequenceOffset(int* trt_mha_padding_offset,\n const int* mask_index,\n const int batch_size,\n const int sequence_length,\n hipStream_t stream);\n\nvoid LaunchTrtSequenceOffset2d(int* trt_mha_padding_offset,\n const int* mask_index,\n const int batch_size,\n const int sequence_length,\n hipStream_t stream);\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\nusing namespace onnxruntime::cuda;\n\ntemplate \nclass DecoderAttention final : public CudaKernel {\n public:\n DecoderAttention(const OpKernelInfo& info);\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n int num_heads_;\n float mask_filter_value_;\n};\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\nusing namespace onnxruntime::rocm;\n\ntemplate \nclass DecoderAttention final : public RocmKernel {\n public:\n DecoderAttention(const OpKernelInfo& info);\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n int num_heads_;\n float mask_filter_value_;\n};\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n\n\n#include \"core/providers/cuda/cuda_common.h\"\n#include \"contrib_ops/cpu/bert/embed_layer_norm_helper.h\"\n#include \"embed_layer_norm.h\"\n#include \"embed_layer_norm_impl.h\"\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n#define REGISTER_KERNEL_TYPED(T) ONNX_OPERATOR_TYPED_KERNEL_EX( EmbedLayerNormalization, kMSDomain, 1, T, kCudaExecutionProvider, (*KernelDefBuilder::Create()) .TypeConstraint(\"T\", DataTypeImpl::GetTensorType()), EmbedLayerNorm);\nREGISTER_KERNEL_TYPED(float)\nREGISTER_KERNEL_TYPED(MLFloat16)\nusing namespace ONNX_NAMESPACE;\ntemplate \nEmbedLayerNorm::EmbedLayerNorm(const OpKernelInfo& op_kernel_info) : CudaKernel(op_kernel_info) {\n ORT_ENFORCE(op_kernel_info.GetAttr(\"epsilon\", &epsilon_).IsOK());\n ORT_ENFORCE(epsilon_ >= 0);\n}\ntemplate \nStatus EmbedLayerNorm::ComputeInternal(OpKernelContext* context) const {\n ORT_RETURN_IF_ERROR(embed_layer_norm::CheckInputs(context));\n const Tensor* input_ids = context->Input(0);\n const Tensor* segment_ids = context->Input(1); \n const Tensor* word_embedding = context->Input(2);\n const Tensor* position_embedding = context->Input(3);\n const Tensor* segment_embedding = context->Input(4); \n const Tensor* gamma = context->Input(5);\n const Tensor* beta = context->Input(6);\n const Tensor* mask = context->Input(7); \n const Tensor* position_ids = context->Input(8); \n const auto& input_dims = input_ids->Shape().GetDims();\n int64_t hidden_size = word_embedding->Shape()[1];\n TensorShape output_shape({input_dims[0], input_dims[1], hidden_size});\n Tensor* output = context->Output(0, output_shape);\n TensorShape mask_index_shape({input_dims[0]});\n Tensor* mask_index = context->Output(1, mask_index_shape);\n Tensor* embedding_sum = context->Output(2, output_shape);\n int batch_size = static_cast(input_dims[0]);\n int sequence_length = static_cast(input_dims[1]);\n size_t element_size = sizeof(T);\n const bool broadcast_position_ids = (nullptr != position_ids && position_ids->Shape()[0] == 1);\n return LaunchEmbedLayerNormKernel(\n Stream(context), output->MutableData(), nullptr == mask_index ? nullptr : mask_index->MutableData(), input_ids->Data(), nullptr == segment_ids ? nullptr : segment_ids->Data(), nullptr == mask ? nullptr : mask->Data(), gamma->Data(), beta->Data(), word_embedding->Data(), position_embedding->Data(), nullptr == segment_embedding ? nullptr : segment_embedding->Data(), epsilon_, static_cast(hidden_size), batch_size, sequence_length, element_size, embedding_sum == nullptr ? nullptr : embedding_sum->MutableData(), position_ids == nullptr ? nullptr : position_ids->Data(), broadcast_position_ids);\n}\n} \n} \n} \n\n###", "hip": " \n\n#include \"core/providers/rocm/rocm_common.h\"\n#include \"contrib_ops/cpu/bert/embed_layer_norm_helper.h\"\n#include \"embed_layer_norm.h\"\n#include \"embed_layer_norm_impl.h\"\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n#define REGISTER_KERNEL_TYPED(T) ONNX_OPERATOR_TYPED_KERNEL_EX( EmbedLayerNormalization, kMSDomain, 1, T, kRocmExecutionProvider, (*KernelDefBuilder::Create()) .TypeConstraint(\"T\", DataTypeImpl::GetTensorType()), EmbedLayerNorm);\nREGISTER_KERNEL_TYPED(float)\nREGISTER_KERNEL_TYPED(MLFloat16)\nusing namespace ONNX_NAMESPACE;\ntemplate \nEmbedLayerNorm::EmbedLayerNorm(const OpKernelInfo& op_kernel_info) : RocmKernel(op_kernel_info) {\n ORT_ENFORCE(op_kernel_info.GetAttr(\"epsilon\", &epsilon_).IsOK());\n ORT_ENFORCE(epsilon_ >= 0);\n}\ntemplate \nStatus EmbedLayerNorm::ComputeInternal(OpKernelContext* context) const {\n ORT_RETURN_IF_ERROR(embed_layer_norm::CheckInputs(context));\n const Tensor* input_ids = context->Input(0);\n const Tensor* segment_ids = context->Input(1); \n const Tensor* word_embedding = context->Input(2);\n const Tensor* position_embedding = context->Input(3);\n const Tensor* segment_embedding = context->Input(4); \n const Tensor* gamma = context->Input(5);\n const Tensor* beta = context->Input(6);\n const Tensor* mask = context->Input(7); \n const Tensor* position_ids = context->Input(8); \n const auto& input_dims = input_ids->Shape().GetDims();\n int64_t hidden_size = word_embedding->Shape()[1];\n TensorShape output_shape({input_dims[0], input_dims[1], hidden_size});\n Tensor* output = context->Output(0, output_shape);\n TensorShape mask_index_shape({input_dims[0]});\n Tensor* mask_index = context->Output(1, mask_index_shape);\n Tensor* embedding_sum = context->Output(2, output_shape);\n int batch_size = static_cast(input_dims[0]);\n int sequence_length = static_cast(input_dims[1]);\n size_t element_size = sizeof(T);\n const bool broadcast_position_ids = (nullptr != position_ids && position_ids->Shape()[0] == 1);\n return LaunchEmbedLayerNormKernel(\n Stream(context), output->MutableData(), nullptr == mask_index ? nullptr : mask_index->MutableData(), input_ids->Data(), nullptr == segment_ids ? nullptr : segment_ids->Data(), nullptr == mask ? nullptr : mask->Data(), gamma->Data(), beta->Data(), word_embedding->Data(), position_embedding->Data(), nullptr == segment_embedding ? nullptr : segment_embedding->Data(), epsilon_, static_cast(hidden_size), batch_size, sequence_length, element_size, embedding_sum == nullptr ? nullptr : embedding_sum->MutableData(), position_ids == nullptr ? nullptr : position_ids->Data(), broadcast_position_ids);\n}\n} \n} \n} ###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass Range final : public CudaKernel {\n public:\n explicit Range(const OpKernelInfo& info) : CudaKernel(info) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass Range final : public RocmKernel {\n public:\n explicit Range(const OpKernelInfo& info) : RocmKernel(info) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\nusing namespace onnxruntime::cuda;\n\ntemplate \nclass EmbedLayerNorm final : public CudaKernel {\n public:\n EmbedLayerNorm(const OpKernelInfo& op_kernel_info);\n Status ComputeInternal(OpKernelContext* ctx) const override;\n\n private:\n float epsilon_;\n};\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\nusing namespace onnxruntime::rocm;\n\ntemplate \nclass EmbedLayerNorm final : public RocmKernel {\n public:\n EmbedLayerNorm(const OpKernelInfo& op_kernel_info);\n Status ComputeInternal(OpKernelContext* ctx) const override;\n\n private:\n float epsilon_;\n};\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n#pragma once\n#include \"core/common/common.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\nStatus LaunchEmbedLayerNormKernel(cudaStream_t stream,\n void* output, // output tensor\n void* mask_index, // output mask index\n const int* input_ids, // input word IDs\n const int* segment_ids, // input segment IDs\n const int* input_mask, // input mask\n const void* gamma, // weight for layer normalization\n const void* beta, // bias for layer normalization\n const void* word_embedding, // weights for word embeddings\n const void* position_embedding, // weights for position embeddings\n const void* segment_embedding, // weights for segment (like sentence) embeddings\n float epsilon, // epsilon for layer normalization\n const int hidden_size, // hidden size (that is head_size * num_heads)\n int batch_size, // batch size\n int sequence_length, // sequence length\n const size_t element_size, // size of output element: 2 for half, 4 for float.\n void* embedding_sum, // Optional output of sum of embeddings\n const int* position_ids, // Optional input of position ids\n const bool broadcast_position_ids); // Whether to broadcast position ids\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n#pragma once\n#include \"core/common/common.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\nStatus LaunchEmbedLayerNormKernel(hipStream_t stream,\n void* output, // output tensor\n void* mask_index, // output mask index\n const int* input_ids, // input word IDs\n const int* segment_ids, // input segment IDs\n const int* input_mask, // input mask\n const void* gamma, // weight for layer normalization\n const void* beta, // bias for layer normalization\n const void* word_embedding, // weights for word embeddings\n const void* position_embedding, // weights for position embeddings\n const void* segment_embedding, // weights for segment (like sentence) embeddings\n float epsilon, // epsilon for layer normalization\n const int hidden_size, // hidden size (that is head_size * num_heads)\n int batch_size, // batch size\n int sequence_length, // sequence length\n const size_t element_size, // size of output element: 2 for half, 4 for float.\n void* embedding_sum, // Optional output of sum of embeddings\n const int* position_ids, // Optional input of position ids\n const bool broadcast_position_ids); // Whether to broadcast position ids\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/common/common.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"contrib_ops/cpu/bert/longformer_attention_base.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\nusing namespace onnxruntime::cuda;\n\ntemplate \nclass LongformerAttention final : public CudaKernel, public LongformerAttentionBase {\n public:\n LongformerAttention(const OpKernelInfo& info);\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n bool use_compact_memory_;\n bool use_half4_;\n};\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/common/common.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"contrib_ops/cpu/bert/longformer_attention_base.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\nusing namespace onnxruntime::rocm;\n\ntemplate \nclass LongformerAttention final : public RocmKernel, public LongformerAttentionBase {\n public:\n LongformerAttention(const OpKernelInfo& info);\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n bool use_compact_memory_;\n bool use_half4_;\n};\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\nsize_t GetPinnedBufferSize(\n size_t batch_size);\n\nsize_t GetLongformerAttentionWorkspaceSize(\n size_t element_size,\n size_t batch_size,\n size_t num_heads,\n size_t head_size,\n size_t sequence_length,\n\n size_t max_num_global,\n size_t window,\n bool disable_compact_memory);\n\nStatus LaunchLongformerAttentionKernel(\n const cudaDeviceProp& device_prop, // Device Properties\n cublasHandle_t cublas, // Cublas handle\n cudaStream_t stream, // CUDA stream\n const void* input, // Input tensor\n const void* bias, // Bias tensor\n const void* attention_mask, // Attention mask with shape (B, S)\n const void* global_input, // Global attention input, or nullptr when max_num_global == 0.\n const void* global_bias, // Global bias tensor\n const int* global_attention, // Global attention flags with shape (B, S)\n const int* global_index, // Global index\n const int* batch_global_num, // Number of global tokens per batch. It is in device memory.\n void* pinned_buffer, // Pinned memory: copy of batch_global_num, and a buffer to copy to scratch2.\n void* workspace, // Temporary buffer\n void* output, // Output tensor\n int batch_size, // Batch size (B)\n int sequence_length, // Sequence length (S)\n int num_heads, // Number of attention heads (N)\n int head_size, // Hidden layer size per head (H)\n int window, // One sided attention window (W)\n int max_num_global, // Maximum number of global tokens (G)\n const size_t element_size, // Element size of input tensor,\n bool disable_compact_memory, // Disable compact memory kernel\n bool use_merged_qkv_weights,\n bool use_half4);\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\nsize_t GetPinnedBufferSize(\n size_t batch_size);\n\nsize_t GetLongformerAttentionWorkspaceSize(\n size_t element_size,\n size_t batch_size,\n size_t num_heads,\n size_t head_size,\n size_t sequence_length,\n\n size_t max_num_global,\n size_t window,\n bool disable_compact_memory);\n\nStatus LaunchLongformerAttentionKernel(\n const hipDeviceProp_t& device_prop, // Device Properties\n rocblas_handle rocblas, // Rocblas handle\n hipStream_t stream, // ROCM stream\n const void* input, // Input tensor\n const void* bias, // Bias tensor\n const void* attention_mask, // Attention mask with shape (B, S)\n const void* global_input, // Global attention input, or nullptr when max_num_global == 0.\n const void* global_bias, // Global bias tensor\n const int* global_attention, // Global attention flags with shape (B, S)\n const int* global_index, // Global index\n const int* batch_global_num, // Number of global tokens per batch. It is in device memory.\n void* pinned_buffer, // Pinned memory: copy of batch_global_num, and a buffer to copy to scratch2.\n void* workspace, // Temporary buffer\n void* output, // Output tensor\n int batch_size, // Batch size (B)\n int sequence_length, // Sequence length (S)\n int num_heads, // Number of attention heads (N)\n int head_size, // Hidden layer size per head (H)\n int window, // One sided attention window (W)\n int max_num_global, // Maximum number of global tokens (G)\n const size_t element_size, // Element size of input tensor,\n bool disable_compact_memory, // Disable compact memory kernel\n bool use_merged_qkv_weights,\n bool use_half4);\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n/*\nCopyright (c) NVIDIA Corporation and Microsoft Corporation\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n#pragma once\n#include \"core/common/common.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\n// Launch the softmax kernels that does not use compact memory.\nStatus LaunchLongformerSoftmaxSimpleKernel(\n cudaStream_t stream,\n cublasHandle_t cublas,\n void* workspace, // softmax space\n const void* q, // transposed Q with shape (B, N, S, H)\n const void* k, // transposed K with shape (B, N, S, H)\n const void* v, // transposed V with shape (B, N, S, H)\n const void* attention_mask, // attention mask with shape (B, S), with value 0.0 not masked, and -10000.0 or torch.finfo(dtype).min masked.\n const void* global_q, // Q for global tokens with shape (B, N, S, H)\n const void* global_k, // K for global tokens with shape (B, N, S, H)\n const void* global_v, // V for global tokens with shape (B, N, S, H)\n const int* global_attention, // global attention flags with shape (B, S), with value 0 for local and 1 for global.\n const int* global_index, // Global index with shape (B, S)\n const int* batch_global_num, // Number of global tokens per batch with shape (B, 1)\n void* pinned_buffer, // Pinned memory in CPU. Number of global tokens per batch with shape (B, 1)\n void* output, // output with shape (B, N, S, H)\n float scaler, // scalar\n int batch_size, // batch size\n int sequence_length, // sequence length\n int num_heads, // number of heads\n int head_size, // hidden size per head\n int attention_window, // one sided windows size\n size_t element_size);\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " /*\nCopyright (c) NVIDIA Corporation and Microsoft Corporation\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n#pragma once\n#include \"core/common/common.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\n// Launch the softmax kernels that does not use compact memory.\nStatus LaunchLongformerSoftmaxSimpleKernel(\n hipStream_t stream,\n rocblas_handle rocblas,\n void* workspace, // softmax space\n const void* q, // transposed Q with shape (B, N, S, H)\n const void* k, // transposed K with shape (B, N, S, H)\n const void* v, // transposed V with shape (B, N, S, H)\n const void* attention_mask, // attention mask with shape (B, S), with value 0.0 not masked, and -10000.0 or torch.finfo(dtype).min masked.\n const void* global_q, // Q for global tokens with shape (B, N, S, H)\n const void* global_k, // K for global tokens with shape (B, N, S, H)\n const void* global_v, // V for global tokens with shape (B, N, S, H)\n const int* global_attention, // global attention flags with shape (B, S), with value 0 for local and 1 for global.\n const int* global_index, // Global index with shape (B, S)\n const int* batch_global_num, // Number of global tokens per batch with shape (B, 1)\n void* pinned_buffer, // Pinned memory in CPU. Number of global tokens per batch with shape (B, 1)\n void* output, // output with shape (B, N, S, H)\n float scaler, // scalar\n int batch_size, // batch size\n int sequence_length, // sequence length\n int num_heads, // number of heads\n int head_size, // hidden size per head\n int attention_window, // one sided windows size\n size_t element_size);\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n/*\nCopyright (c) NVIDIA Corporation and Microsoft Corporation\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n#include \n#include \n#include \"core/providers/cuda/cuda_common.h\"\n#include \"core/providers/cuda/cu_inc/common.cuh\"\n#include \"longformer_global_impl.h\"\n\nusing namespace onnxruntime::cuda;\nusing namespace cub;\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\nsize_t GetGlobalScratchSize(int sequence_length) {\n // Global Index scratch layout:\n // [sequence_index: int S][tmp_storage: int 1024x1]\n return sizeof(int) * (sequence_length + 1024);\n}\n\n__global__ void InitSequenceIndexKernel(int* sequence_index, int sequence_length) {\n for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < sequence_length; i += blockDim.x) {\n sequence_index[i] = i;\n }\n}\n\nStatus BuildGlobalIndex(\n const cudaDeviceProp& device_prop,\n cudaStream_t stream,\n const int* global_attention,\n int batch_size,\n int sequence_length,\n int* global_index,\n int* batch_global_num,\n void* scratch,\n size_t scratch_size) {\n int* sequence_index = (int*)scratch;\n int* tmp_storage = sequence_index + sequence_length;\n\n const int threads = device_prop.maxThreadsPerBlock;\n int blocks = CeilDiv(sequence_length, threads);\n InitSequenceIndexKernel<<>>(sequence_index, sequence_length);\n\n // Determine temporary device storage size.\n // For int* inputs/outputs, it need 767 bytes. We reserved 1024*4 bytes, which shall be enough.\n size_t temp_storage_bytes = 0;\n CUDA_RETURN_IF_ERROR(cub::DevicePartition::Flagged(\n NULL, temp_storage_bytes, sequence_index,\n global_attention, global_index, batch_global_num, sequence_length, stream));\n if (temp_storage_bytes + sizeof(int) * sequence_length > scratch_size) {\n ORT_THROW(\"LongformerAttention scratch space is not large enough. Temp storage bytes are\", temp_storage_bytes);\n }\n\n // Find the global attention indices and number of global attention tokens\n for (int i = 0; i < batch_size; ++i) {\n CUDA_RETURN_IF_ERROR(cub::DevicePartition::Flagged(\n reinterpret_cast(tmp_storage), temp_storage_bytes, sequence_index,\n global_attention + i * sequence_length, global_index + i * sequence_length,\n batch_global_num + i, sequence_length, stream));\n }\n\n return Status::OK();\n}\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " #include \"hip/hip_runtime.h\"\n/*\nCopyright (c) NVIDIA Corporation and Microsoft Corporation\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n#include \n#include \n#include \"core/providers/rocm/rocm_common.h\"\n#include \"core/providers/rocm/cu_inc/common.cuh\"\n#include \"longformer_global_impl.h\"\n\nusing namespace onnxruntime::rocm;\nusing namespace hipcub;\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\nsize_t GetGlobalScratchSize(int sequence_length) {\n // Global Index scratch layout:\n // [sequence_index: int S][tmp_storage: int 1024x1]\n return sizeof(int) * (sequence_length + 1024);\n}\n\n__global__ void InitSequenceIndexKernel(int* sequence_index, int sequence_length) {\n for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < sequence_length; i += blockDim.x) {\n sequence_index[i] = i;\n }\n}\n\nStatus BuildGlobalIndex(\n const hipDeviceProp_t& device_prop,\n hipStream_t stream,\n const int* global_attention,\n int batch_size,\n int sequence_length,\n int* global_index,\n int* batch_global_num,\n void* scratch,\n size_t scratch_size) {\n int* sequence_index = (int*)scratch;\n int* tmp_storage = sequence_index + sequence_length;\n\n const int threads = device_prop.maxThreadsPerBlock;\n int blocks = CeilDiv(sequence_length, threads);\n InitSequenceIndexKernel<<>>(sequence_index, sequence_length);\n\n // Determine temporary device storage size.\n // For int* inputs/outputs, it need 767 bytes. We reserved 1024*4 bytes, which shall be enough.\n size_t temp_storage_bytes = 0;\n HIP_RETURN_IF_ERROR(hipcub::DevicePartition::Flagged(\n NULL, temp_storage_bytes, sequence_index,\n global_attention, global_index, batch_global_num, sequence_length, stream));\n if (temp_storage_bytes + sizeof(int) * sequence_length > scratch_size) {\n ORT_THROW(\"LongformerAttention scratch space is not large enough. Temp storage bytes are\", temp_storage_bytes);\n }\n\n // Find the global attention indices and number of global attention tokens\n for (int i = 0; i < batch_size; ++i) {\n HIP_RETURN_IF_ERROR(hipcub::DevicePartition::Flagged(\n reinterpret_cast(tmp_storage), temp_storage_bytes, sequence_index,\n global_attention + i * sequence_length, global_index + i * sequence_length,\n batch_global_num + i, sequence_length, stream));\n }\n\n return Status::OK();\n}\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\n// Size of global Index scratch in bytes.\nsize_t GetGlobalScratchSize(int sequence_length);\n\n// Find the global attention indices and number of global attention tokens\nStatus BuildGlobalIndex(\n const cudaDeviceProp& device_prop,\n cudaStream_t stream,\n const int* global_attention,\n int batch_size,\n int sequence_length,\n int* global_index,\n int* batch_global_num,\n void* scratch,\n size_t scratch_size);\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\n// Size of global Index scratch in bytes.\nsize_t GetGlobalScratchSize(int sequence_length);\n\n// Find the global attention indices and number of global attention tokens\nStatus BuildGlobalIndex(\n const hipDeviceProp_t& device_prop,\n hipStream_t stream,\n const int* global_attention,\n int batch_size,\n int sequence_length,\n int* global_index,\n int* batch_global_num,\n void* scratch,\n size_t scratch_size);\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/cuda/cuda_common.h\"\n#include \"ngram_repeat_block.h\"\n#include \"ngram_repeat_block_impl.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\nONNX_OPERATOR_KERNEL_EX(\n NGramRepeatBlock,\n kMSDomain,\n 1,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"Tid\", DataTypeImpl::GetTensorType())\n .TypeConstraint(\"T\", DataTypeImpl::GetTensorType()),\n NGramRepeatBlock);\n\nusing namespace ONNX_NAMESPACE;\n\nNGramRepeatBlock::NGramRepeatBlock(const OpKernelInfo& info) : CudaKernel(info) {\n ORT_ENFORCE(info.GetAttr(\"ngram_size\", &ngram_size_).IsOK());\n ORT_ENFORCE(ngram_size_ > 0);\n}\n\nStatus NGramRepeatBlock::ComputeInternal(OpKernelContext* context) const {\n const Tensor* input_ids = context->Input(0);\n const Tensor* scores = context->Input(1);\n Tensor* output = context->Output(0, scores->Shape());\n\n const auto* scores_source = static_cast(scores->DataRaw());\n auto* scores_target = static_cast(output->MutableDataRaw());\n if (scores_source != scores_target) {\n CUDA_RETURN_IF_ERROR(cudaMemcpyAsync(scores_target, scores_source, scores->Shape().Size() * sizeof(float), cudaMemcpyDeviceToDevice, Stream(context)));\n }\n\n const auto& input_ids_dims = input_ids->Shape().GetDims();\n const auto& scores_dims = scores->Shape().GetDims();\n ORT_ENFORCE(input_ids_dims.size() == 2);\n ORT_ENFORCE(scores_dims.size() == 2);\n int64_t batch_size = input_ids_dims[0];\n int64_t cur_len = input_ids_dims[1];\n ORT_ENFORCE(scores_dims[0] == batch_size);\n int64_t vocab_size = scores_dims[1];\n\n if (cur_len + 1 < ngram_size_) {\n return Status::OK();\n }\n\n const auto* input_ids_data = static_cast(input_ids->DataRaw(input_ids->DataType()));\n\n NGramRepeatBlockImpl(\n Stream(context),\n input_ids_data,\n scores_target,\n gsl::narrow_cast(batch_size),\n gsl::narrow_cast(cur_len - 1),\n gsl::narrow_cast(cur_len),\n gsl::narrow_cast(vocab_size),\n gsl::narrow_cast(1),\n gsl::narrow_cast(ngram_size_));\n\n return Status::OK();\n}\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/rocm/rocm_common.h\"\n#include \"ngram_repeat_block.h\"\n#include \"ngram_repeat_block_impl.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\nONNX_OPERATOR_KERNEL_EX(\n NGramRepeatBlock,\n kMSDomain,\n 1,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"Tid\", DataTypeImpl::GetTensorType())\n .TypeConstraint(\"T\", DataTypeImpl::GetTensorType()),\n NGramRepeatBlock);\n\nusing namespace ONNX_NAMESPACE;\n\nNGramRepeatBlock::NGramRepeatBlock(const OpKernelInfo& info) : RocmKernel(info) {\n ORT_ENFORCE(info.GetAttr(\"ngram_size\", &ngram_size_).IsOK());\n ORT_ENFORCE(ngram_size_ > 0);\n}\n\nStatus NGramRepeatBlock::ComputeInternal(OpKernelContext* context) const {\n const Tensor* input_ids = context->Input(0);\n const Tensor* scores = context->Input(1);\n Tensor* output = context->Output(0, scores->Shape());\n\n const auto* scores_source = static_cast(scores->DataRaw());\n auto* scores_target = static_cast(output->MutableDataRaw());\n if (scores_source != scores_target) {\n HIP_RETURN_IF_ERROR(hipMemcpyAsync(scores_target, scores_source, scores->Shape().Size() * sizeof(float), hipMemcpyDeviceToDevice, Stream(context)));\n }\n\n const auto& input_ids_dims = input_ids->Shape().GetDims();\n const auto& scores_dims = scores->Shape().GetDims();\n ORT_ENFORCE(input_ids_dims.size() == 2);\n ORT_ENFORCE(scores_dims.size() == 2);\n int64_t batch_size = input_ids_dims[0];\n int64_t cur_len = input_ids_dims[1];\n ORT_ENFORCE(scores_dims[0] == batch_size);\n int64_t vocab_size = scores_dims[1];\n\n if (cur_len + 1 < ngram_size_) {\n return Status::OK();\n }\n\n const auto* input_ids_data = static_cast(input_ids->DataRaw(input_ids->DataType()));\n\n NGramRepeatBlockImpl(\n Stream(context),\n input_ids_data,\n scores_target,\n gsl::narrow_cast(batch_size),\n gsl::narrow_cast(cur_len - 1),\n gsl::narrow_cast(cur_len),\n gsl::narrow_cast(vocab_size),\n gsl::narrow_cast(1),\n gsl::narrow_cast(ngram_size_));\n\n return Status::OK();\n}\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\nusing namespace onnxruntime::cuda;\n\nclass NGramRepeatBlock final : public CudaKernel {\n public:\n NGramRepeatBlock(const OpKernelInfo& op_kernel_info);\n Status ComputeInternal(OpKernelContext* ctx) const override;\n\n private:\n int64_t ngram_size_;\n};\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\nusing namespace onnxruntime::rocm;\n\nclass NGramRepeatBlock final : public RocmKernel {\n public:\n NGramRepeatBlock(const OpKernelInfo& op_kernel_info);\n Status ComputeInternal(OpKernelContext* ctx) const override;\n\n private:\n int64_t ngram_size_;\n};\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n/*\nCopyright (c) Microsoft Corporation.\nLicensed under the MIT License.\n*/\n\n/*\nKernel implementation for blocking repeated n-grams.\n*/\n\n#include \"core/providers/cuda/cu_inc/common.cuh\"\n#include \"contrib_ops/cuda/bert/ngram_repeat_block_impl.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\nusing namespace onnxruntime::cuda;\n\n// Ban repeated ngrams of length = 'no_repeat_ngram_size'\n__global__ void banRepeatedTokens(const int64_t* __restrict__ tokens,\n float* __restrict__ lprobs,\n int max_predict_len, int vocab_size,\n int no_repeat_ngram_size) {\n auto row = blockIdx.x;\n auto col = threadIdx.x;\n auto start = row * (max_predict_len) + col;\n // Each thread compares ngram starting from\n // thread index with final ngram starting from\n // step - no_repeat_ngram_size +2\n auto check_start_pos = blockDim.x;\n auto lprob_start = row * vocab_size;\n bool is_banned = true;\n extern __shared__ int64_t tokens_shm[];\n tokens_shm[col] = tokens[start];\n if (col == blockDim.x - 1) {\n for (int i = 1; i < no_repeat_ngram_size; i++) {\n if (col + i < max_predict_len) {\n tokens_shm[col + i] = tokens[start + i];\n }\n }\n }\n __syncthreads();\n\n for (int k = 0; k < no_repeat_ngram_size - 1; k++) {\n if (tokens_shm[col + k] != tokens_shm[check_start_pos + k]) {\n is_banned = false;\n }\n }\n if (is_banned == true) {\n auto token_to_be_banned = tokens_shm[col + no_repeat_ngram_size - 1];\n lprobs[lprob_start + token_to_be_banned] = -INFINITY;\n }\n}\n\n// Allocate blocks and threads based on\n// batch size and sequence length and launch\n// kernel\nvoid NGramRepeatBlockImpl(\n cudaStream_t stream,\n const int64_t* tokens_ptr,\n float* scores_ptr,\n int bsz,\n int step,\n int max_predict_len,\n int vocab_size,\n int beam_size,\n int no_repeat_ngram_size) {\n int threads = step - no_repeat_ngram_size + 2;\n if (threads <= 0) return;\n int blocks = bsz * beam_size;\n int shared_mem_size = (step + 1) * sizeof(int64_t);\n\n // Launching N blocks where N is number of samples in a batch (beams*bsz)\n // Launching T threads where T is number of previous ngrams in a sample\n // Allocating shared mem per block for fastser access of input tokens since\n // each token will be accessed N times to compare with current Ngram where\n // N is Ngram size.\n banRepeatedTokens<<>>(\n tokens_ptr, scores_ptr, max_predict_len, vocab_size, no_repeat_ngram_size);\n}\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " #include \"hip/hip_runtime.h\"\n/*\nCopyright (c) Microsoft Corporation.\nLicensed under the MIT License.\n*/\n\n/*\nKernel implementation for blocking repeated n-grams.\n*/\n\n#include \"core/providers/rocm/cu_inc/common.cuh\"\n#include \"contrib_ops/rocm/bert/ngram_repeat_block_impl.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\nusing namespace onnxruntime::rocm;\n\n// Ban repeated ngrams of length = 'no_repeat_ngram_size'\n__global__ void banRepeatedTokens(const int64_t* __restrict__ tokens,\n float* __restrict__ lprobs,\n int max_predict_len, int vocab_size,\n int no_repeat_ngram_size) {\n auto row = blockIdx.x;\n auto col = threadIdx.x;\n auto start = row * (max_predict_len) + col;\n // Each thread compares ngram starting from\n // thread index with final ngram starting from\n // step - no_repeat_ngram_size +2\n auto check_start_pos = blockDim.x;\n auto lprob_start = row * vocab_size;\n bool is_banned = true;\n extern __shared__ int64_t tokens_shm[];\n tokens_shm[col] = tokens[start];\n if (col == blockDim.x - 1) {\n for (int i = 1; i < no_repeat_ngram_size; i++) {\n if (col + i < max_predict_len) {\n tokens_shm[col + i] = tokens[start + i];\n }\n }\n }\n __syncthreads();\n\n for (int k = 0; k < no_repeat_ngram_size - 1; k++) {\n if (tokens_shm[col + k] != tokens_shm[check_start_pos + k]) {\n is_banned = false;\n }\n }\n if (is_banned == true) {\n auto token_to_be_banned = tokens_shm[col + no_repeat_ngram_size - 1];\n lprobs[lprob_start + token_to_be_banned] = -INFINITY;\n }\n}\n\n// Allocate blocks and threads based on\n// batch size and sequence length and launch\n// kernel\nvoid NGramRepeatBlockImpl(\n hipStream_t stream,\n const int64_t* tokens_ptr,\n float* scores_ptr,\n int bsz,\n int step,\n int max_predict_len,\n int vocab_size,\n int beam_size,\n int no_repeat_ngram_size) {\n int threads = step - no_repeat_ngram_size + 2;\n if (threads <= 0) return;\n int blocks = bsz * beam_size;\n int shared_mem_size = (step + 1) * sizeof(int64_t);\n\n // Launching N blocks where N is number of samples in a batch (beams*bsz)\n // Launching T threads where T is number of previous ngrams in a sample\n // Allocating shared mem per block for fastser access of input tokens since\n // each token will be accessed N times to compare with current Ngram where\n // N is Ngram size.\n banRepeatedTokens<<>>(\n tokens_ptr, scores_ptr, max_predict_len, vocab_size, no_repeat_ngram_size);\n}\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \n#include \n#include \n#include \"core/providers/cuda/cu_inc/common.cuh\"\n#include \"core/providers/cuda/cuda_common.h\"\n#include \"range_impl.h\"\n\nusing namespace onnxruntime::cuda;\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \n__global__ void RangeKernel(const T start, const T delta, const int count, T* output) {\n int index = blockIdx.x * blockDim.x + threadIdx.x;\n if (index < count) {\n output[index] = start + delta * index;\n }\n}\n\ntemplate \nStatus RangeImpl(cudaStream_t stream, const T start, const T delta, const int count, T* output) {\n constexpr int block_size = 256;\n int grid_size = (count + block_size - 1) / block_size;\n RangeKernel<<>>(start, delta, count, output);\n return CUDA_CALL(cudaGetLastError());\n}\n\n#define SPECIALIZED_IMPL(T) \\\n template Status RangeImpl(cudaStream_t stream, const T start, const T delta, const int count, T* output);\n\nSPECIALIZED_IMPL(int16_t)\nSPECIALIZED_IMPL(int32_t)\nSPECIALIZED_IMPL(int64_t)\nSPECIALIZED_IMPL(float)\nSPECIALIZED_IMPL(double)\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " #include \"hip/hip_runtime.h\"\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \n#include \n#include \n#include \"core/providers/rocm/cu_inc/common.cuh\"\n#include \"core/providers/rocm/rocm_common.h\"\n#include \"range_impl.h\"\n\nusing namespace onnxruntime::rocm;\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \n__global__ void RangeKernel(const T start, const T delta, const int count, T* output) {\n int index = blockIdx.x * blockDim.x + threadIdx.x;\n if (index < count) {\n output[index] = start + delta * index;\n }\n}\n\ntemplate \nStatus RangeImpl(hipStream_t stream, const T start, const T delta, const int count, T* output) {\n constexpr int block_size = 256;\n int grid_size = (count + block_size - 1) / block_size;\n RangeKernel<<>>(start, delta, count, output);\n return HIP_CALL(hipGetLastError());\n}\n\n#define SPECIALIZED_IMPL(T) \\\n template Status RangeImpl(hipStream_t stream, const T start, const T delta, const int count, T* output);\n\nSPECIALIZED_IMPL(int16_t)\nSPECIALIZED_IMPL(int32_t)\nSPECIALIZED_IMPL(int64_t)\nSPECIALIZED_IMPL(float)\nSPECIALIZED_IMPL(double)\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\nusing namespace onnxruntime::cuda;\n\nvoid NGramRepeatBlockImpl(\n cudaStream_t stream,\n const int64_t* tokens_ptr,\n float* scores_ptr,\n int bsz,\n int step,\n int max_predict_len,\n int vocab_size,\n int beam_size,\n int no_repeat_ngram_size);\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\nusing namespace onnxruntime::rocm;\n\nvoid NGramRepeatBlockImpl(\n hipStream_t stream,\n const int64_t* tokens_ptr,\n float* scores_ptr,\n int bsz,\n int step,\n int max_predict_len,\n int vocab_size,\n int beam_size,\n int no_repeat_ngram_size);\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\nusing namespace onnxruntime::cuda;\n\ntemplate \nclass RemovePadding final : public CudaKernel {\n public:\n RemovePadding(const OpKernelInfo& op_kernel_info);\n Status ComputeInternal(OpKernelContext* ctx) const override;\n};\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\nusing namespace onnxruntime::rocm;\n\ntemplate \nclass RemovePadding final : public RocmKernel {\n public:\n RemovePadding(const OpKernelInfo& op_kernel_info);\n Status ComputeInternal(OpKernelContext* ctx) const override;\n};\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/cuda/cuda_common.h\"\n#include \"contrib_ops/cuda/bert/restore_padding.h\"\n#include \"contrib_ops/cuda/bert/bert_padding.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\n#define REGISTER_KERNEL_TYPED(T) \\\n ONNX_OPERATOR_TYPED_KERNEL_EX( \\\n RestorePadding, \\\n kMSDomain, \\\n 1, \\\n T, \\\n kCudaExecutionProvider, \\\n (*KernelDefBuilder::Create()) \\\n .TypeConstraint(\"T\", DataTypeImpl::GetTensorType()), \\\n RestorePadding);\n\nREGISTER_KERNEL_TYPED(float)\nREGISTER_KERNEL_TYPED(MLFloat16)\n\nusing namespace ONNX_NAMESPACE;\n\ntemplate \nRestorePadding::RestorePadding(const OpKernelInfo& op_kernel_info) : CudaKernel(op_kernel_info) {\n}\n\ntemplate \nStatus RestorePadding::ComputeInternal(OpKernelContext* context) const {\n // shape of inputs:\n // input: (total_tokens, hidden_size)\n // token_offset: (batch_size, sequence_length)\n // shape of outputs:\n // output: (batch_size, sequence_length, hidden_size)\n\n const Tensor* input = context->Input(0);\n const Tensor* token_offset = context->Input(1);\n\n const auto& dims = input->Shape().GetDims();\n if (dims.size() != 2) {\n return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, \"Input 'input' is expected to have 2 dimensions, got \",\n dims.size());\n }\n int64_t total_tokens = dims[0];\n int64_t hidden_size = dims[1];\n\n const auto& token_offset_dims = token_offset->Shape().GetDims();\n if (token_offset_dims.size() != 2) {\n return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, \"Input 'token_offset' is expected to have 2 dimensions, got \",\n token_offset_dims.size());\n }\n int64_t batch_size = token_offset_dims[0];\n int64_t sequence_length = token_offset_dims[1];\n\n TensorShapeVector output_shape(3);\n output_shape[0] = batch_size;\n output_shape[1] = sequence_length;\n output_shape[2] = hidden_size;\n Tensor* output = context->Output(0, output_shape);\n\n typedef typename ToCudaType::MappedType CudaT;\n LaunchRestorePadding(\n reinterpret_cast(output->MutableData()),\n reinterpret_cast(input->Data()),\n token_offset->Data(),\n static_cast(total_tokens),\n static_cast(hidden_size),\n static_cast(batch_size),\n static_cast(sequence_length),\n Stream(context));\n\n CUDA_RETURN_IF_ERROR(cudaGetLastError());\n return Status::OK();\n}\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/rocm/rocm_common.h\"\n#include \"contrib_ops/rocm/bert/restore_padding.h\"\n#include \"contrib_ops/rocm/bert/bert_padding.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\n#define REGISTER_KERNEL_TYPED(T) \\\n ONNX_OPERATOR_TYPED_KERNEL_EX( \\\n RestorePadding, \\\n kMSDomain, \\\n 1, \\\n T, \\\n kRocmExecutionProvider, \\\n (*KernelDefBuilder::Create()) \\\n .TypeConstraint(\"T\", DataTypeImpl::GetTensorType()), \\\n RestorePadding);\n\nREGISTER_KERNEL_TYPED(float)\nREGISTER_KERNEL_TYPED(MLFloat16)\n\nusing namespace ONNX_NAMESPACE;\n\ntemplate \nRestorePadding::RestorePadding(const OpKernelInfo& op_kernel_info) : RocmKernel(op_kernel_info) {\n}\n\ntemplate \nStatus RestorePadding::ComputeInternal(OpKernelContext* context) const {\n // shape of inputs:\n // input: (total_tokens, hidden_size)\n // token_offset: (batch_size, sequence_length)\n // shape of outputs:\n // output: (batch_size, sequence_length, hidden_size)\n\n const Tensor* input = context->Input(0);\n const Tensor* token_offset = context->Input(1);\n\n const auto& dims = input->Shape().GetDims();\n if (dims.size() != 2) {\n return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, \"Input 'input' is expected to have 2 dimensions, got \",\n dims.size());\n }\n int64_t total_tokens = dims[0];\n int64_t hidden_size = dims[1];\n\n const auto& token_offset_dims = token_offset->Shape().GetDims();\n if (token_offset_dims.size() != 2) {\n return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, \"Input 'token_offset' is expected to have 2 dimensions, got \",\n token_offset_dims.size());\n }\n int64_t batch_size = token_offset_dims[0];\n int64_t sequence_length = token_offset_dims[1];\n\n TensorShapeVector output_shape(3);\n output_shape[0] = batch_size;\n output_shape[1] = sequence_length;\n output_shape[2] = hidden_size;\n Tensor* output = context->Output(0, output_shape);\n\n typedef typename ToHipType::MappedType HipT;\n LaunchRestorePadding(\n reinterpret_cast(output->MutableData()),\n reinterpret_cast(input->Data()),\n token_offset->Data(),\n static_cast(total_tokens),\n static_cast(hidden_size),\n static_cast(batch_size),\n static_cast(sequence_length),\n Stream(context));\n\n HIP_RETURN_IF_ERROR(hipGetLastError());\n return Status::OK();\n}\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\nusing namespace onnxruntime::cuda;\n\ntemplate \nclass RestorePadding final : public CudaKernel {\n public:\n RestorePadding(const OpKernelInfo& op_kernel_info);\n Status ComputeInternal(OpKernelContext* ctx) const override;\n};\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\nusing namespace onnxruntime::rocm;\n\ntemplate \nclass RestorePadding final : public RocmKernel {\n public:\n RestorePadding(const OpKernelInfo& op_kernel_info);\n Status ComputeInternal(OpKernelContext* ctx) const override;\n};\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/cuda_common.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\n// A wrapper class of cudaEvent_t to destroy the event automatically for avoiding memory leak.\nclass AutoDestoryCudaEvent {\n public:\n AutoDestoryCudaEvent() : cuda_event_(nullptr) {\n }\n\n ~AutoDestoryCudaEvent() {\n if (cuda_event_ != nullptr)\n (void)cudaEventDestroy(cuda_event_);\n }\n\n cudaEvent_t& Get() {\n return cuda_event_;\n }\n\n private:\n cudaEvent_t cuda_event_;\n};\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/rocm_common.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\n// A wrapper class of hipEvent_t to destroy the event automatically for avoiding memory leak.\nclass AutoDestoryCudaEvent {\n public:\n AutoDestoryCudaEvent() : rocm_event_(nullptr) {\n }\n\n ~AutoDestoryCudaEvent() {\n if (rocm_event_ != nullptr)\n (void)hipEventDestroy(rocm_event_);\n }\n\n hipEvent_t& Get() {\n return rocm_event_;\n }\n\n private:\n hipEvent_t rocm_event_;\n};\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#if defined(USE_MPI)\n#define OMPI_SKIP_MPICXX 1 // See https://github.com/open-mpi/ompi/issues/5157\n#include \n#undef OMPI_SKIP_MPICXX\n\nnamespace onnxruntime {\n\n#if defined(USE_MPI)\n#define MPI_CHECK(condition) \\\n do { \\\n int error = (condition); \\\n ORT_ENFORCE( \\\n error == MPI_SUCCESS, \\\n \"MPI Error at: \", \\\n __FILE__, \\\n \":\", \\\n __LINE__, \\\n \": \", \\\n error); \\\n } while (0)\n#endif\n} // namespace onnxruntime\n#endif\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#if defined(USE_MPI)\n#define OMPI_SKIP_MPICXX 1 // See https://github.com/open-mpi/ompi/issues/5157\n#include \n#undef OMPI_SKIP_MPICXX\n\nnamespace onnxruntime {\n\n#if defined(USE_MPI)\n#define MPI_CHECK(condition) \\\n do { \\\n int error = (condition); \\\n ORT_ENFORCE( \\\n error == MPI_SUCCESS, \\\n \"MPI Error at: \", \\\n __FILE__, \\\n \":\", \\\n __LINE__, \\\n \": \", \\\n error); \\\n } while (0)\n#endif\n} // namespace onnxruntime\n#endif\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/cuda_kernel.h\"\n\n#if defined(ORT_USE_NCCL)\n#include \n#endif\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\n// -----------------------------------------------------------------------\n// Defines a new version of nccl classes\n// that independent with training::DistributedRunContext, only rely on MPI\n// -----------------------------------------------------------------------\nclass NcclContext final {\n public:\n NcclContext();\n ~NcclContext();\n\n ncclComm_t Comm() {\n return comm_;\n }\n\n int Rank() const {\n return rank_;\n }\n\n int Size() const {\n return world_size_;\n }\n\n private:\n ncclComm_t comm_;\n int rank_;\n int world_size_;\n};\n\nclass NcclKernel : public ::onnxruntime::cuda::CudaKernel {\n public:\n explicit NcclKernel(const OpKernelInfo& info);\n\n protected:\n NcclContext* nccl_ = nullptr;\n};\n\n/*\n * Defines new version of Nccl classes that independent with training::DistributedContext\n * only rely on MPI\n */\nclass AllReduce final : public NcclKernel {\n public:\n explicit AllReduce(const OpKernelInfo& info);\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\nclass AllGather final : public NcclKernel {\n public:\n explicit AllGather(const OpKernelInfo& info);\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n int64_t group_size_ = -1;\n int64_t axis_ = -1;\n const CUDAExecutionProvider* cuda_ep_;\n};\n\nclass AllToAll final : public NcclKernel {\n public:\n explicit AllToAll(const OpKernelInfo& info);\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n int64_t group_size_ = -1;\n};\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/rocm_kernel.h\"\n\n#if defined(ORT_USE_NCCL)\n#include \n#endif\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\n// -----------------------------------------------------------------------\n// Defines a new version of nccl classes\n// that independent with training::DistributedRunContext, only rely on MPI\n// -----------------------------------------------------------------------\nclass NcclContext final {\n public:\n NcclContext();\n ~NcclContext();\n\n ncclComm_t Comm() {\n return comm_;\n }\n\n int Rank() const {\n return rank_;\n }\n\n int Size() const {\n return world_size_;\n }\n\n private:\n ncclComm_t comm_;\n int rank_;\n int world_size_;\n};\n\nclass NcclKernel : public ::onnxruntime::rocm::RocmKernel {\n public:\n explicit NcclKernel(const OpKernelInfo& info);\n\n protected:\n NcclContext* nccl_ = nullptr;\n};\n\n/*\n * Defines new version of Nccl classes that independent with training::DistributedContext\n * only rely on MPI\n */\nclass AllReduce final : public NcclKernel {\n public:\n explicit AllReduce(const OpKernelInfo& info);\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\nclass AllGather final : public NcclKernel {\n public:\n explicit AllGather(const OpKernelInfo& info);\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n int64_t group_size_ = -1;\n int64_t axis_ = -1;\n const ROCMExecutionProvider* rocm_ep_;\n};\n\nclass AllToAll final : public NcclKernel {\n public:\n explicit AllToAll(const OpKernelInfo& info);\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n int64_t group_size_ = -1;\n};\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n\n\n#include \"core/providers/cuda/cuda_common.h\"\n#include \"contrib_ops/cuda/diffusion/bias_add.h\"\n#include \"contrib_ops/cuda/diffusion/bias_add_impl.h\"\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n#define REGISTER_KERNEL_TYPED(T) ONNX_OPERATOR_TYPED_KERNEL_EX( BiasAdd, kMSDomain, 1, T, kCudaExecutionProvider, (*KernelDefBuilder::Create()) .TypeConstraint(\"T\", DataTypeImpl::GetTensorType()), BiasAdd);\nREGISTER_KERNEL_TYPED(MLFloat16);\nREGISTER_KERNEL_TYPED(float);\nusing namespace ONNX_NAMESPACE;\ntemplate \nBiasAdd::BiasAdd(const OpKernelInfo& op_info) : CudaKernel(op_info) {\n}\ntemplate \nStatus BiasAdd::ComputeInternal(OpKernelContext* context) const {\n \n \n \n \n const Tensor* input = context->Input(0);\n const auto& input_dims = input->Shape().GetDims();\n if (input_dims.size() != 3) {\n return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, \"The input is expected to have 3 dimensions, got \", input_dims.size());\n }\n if (input_dims[2] != 320 && input_dims[2] != 640 && input_dims[2] != 1280) {\n return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, \"Number of channels should be 320, 640 or 1280, got \", input_dims[2]);\n }\n const Tensor* bias = context->Input(1);\n const auto& bias_dims = bias->Shape().GetDims();\n if (bias_dims.size() != 1) {\n return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, \"The bias is expected to have 1 dimensions, got \", bias_dims.size());\n }\n if (bias_dims[0] != input_dims[2]) {\n return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, \"Number of channels in the last dimension of input and bias are not the same\");\n }\n const Tensor* skip = context->Input(2);\n if (skip->Shape() != input->Shape()) {\n return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, \"Shape of input and skip (residual) shall be the same\");\n }\n Tensor* output = context->Output(0, input->Shape());\n typedef typename ToCudaType::MappedType CudaT;\n const int32_t grid_size = static_cast(input_dims[0] * input_dims[1]);\n LaunchBiasAddKernel(Stream(context), grid_size, static_cast(input_dims[2]), reinterpret_cast(input->Data()), reinterpret_cast(bias->Data()), reinterpret_cast(skip->Data()), reinterpret_cast(output->MutableData()));\n CUDA_RETURN_IF_ERROR(cudaPeekAtLastError());\n return Status::OK();\n}\n} \n} \n} \n\n###", "hip": " \n\n#include \"core/providers/rocm/rocm_common.h\"\n#include \"contrib_ops/rocm/diffusion/bias_add.h\"\n#include \"contrib_ops/rocm/diffusion/bias_add_impl.h\"\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n#define REGISTER_KERNEL_TYPED(T) ONNX_OPERATOR_TYPED_KERNEL_EX( BiasAdd, kMSDomain, 1, T, kRocmExecutionProvider, (*KernelDefBuilder::Create()) .TypeConstraint(\"T\", DataTypeImpl::GetTensorType()), BiasAdd);\nREGISTER_KERNEL_TYPED(MLFloat16);\nREGISTER_KERNEL_TYPED(float);\nusing namespace ONNX_NAMESPACE;\ntemplate \nBiasAdd::BiasAdd(const OpKernelInfo& op_info) : RocmKernel(op_info) {\n}\ntemplate \nStatus BiasAdd::ComputeInternal(OpKernelContext* context) const {\n \n \n \n \n const Tensor* input = context->Input(0);\n const auto& input_dims = input->Shape().GetDims();\n if (input_dims.size() != 3) {\n return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, \"The input is expected to have 3 dimensions, got \", input_dims.size());\n }\n if (input_dims[2] != 320 && input_dims[2] != 640 && input_dims[2] != 1280) {\n return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, \"Number of channels should be 320, 640 or 1280, got \", input_dims[2]);\n }\n const Tensor* bias = context->Input(1);\n const auto& bias_dims = bias->Shape().GetDims();\n if (bias_dims.size() != 1) {\n return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, \"The bias is expected to have 1 dimensions, got \", bias_dims.size());\n }\n if (bias_dims[0] != input_dims[2]) {\n return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, \"Number of channels in the last dimension of input and bias are not the same\");\n }\n const Tensor* skip = context->Input(2);\n if (skip->Shape() != input->Shape()) {\n return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, \"Shape of input and skip (residual) shall be the same\");\n }\n Tensor* output = context->Output(0, input->Shape());\n typedef typename ToHipType::MappedType HipT;\n const int32_t grid_size = static_cast(input_dims[0] * input_dims[1]);\n LaunchBiasAddKernel(Stream(context), grid_size, static_cast(input_dims[2]), reinterpret_cast(input->Data()), reinterpret_cast(bias->Data()), reinterpret_cast(skip->Data()), reinterpret_cast(output->MutableData()));\n HIP_RETURN_IF_ERROR(hipPeekAtLastError());\n return Status::OK();\n}\n} \n} \n} ###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\nusing namespace onnxruntime::cuda;\n\ntemplate \nclass BiasAdd final : public CudaKernel {\n public:\n BiasAdd(const OpKernelInfo& op_kernel_info);\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\nusing namespace onnxruntime::rocm;\n\ntemplate \nclass BiasAdd final : public RocmKernel {\n public:\n BiasAdd(const OpKernelInfo& op_kernel_info);\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n\n\n\n\n#include \n#include \"core/providers/cuda/cu_inc/common.cuh\"\n#include \"contrib_ops/cuda/diffusion/bias_add_impl.h\"\nusing namespace onnxruntime::cuda;\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\ntemplate \n__global__ void BiasAddKernel(T const* input, T const* bias, T const* residual, T* output) {\n int32_t base_offset = blockIdx.x * C + threadIdx.x;\n int32_t bias_offset = threadIdx.x;\n#pragma unroll\n for (int32_t i = 0; i < C / TPB; ++i) {\n output[base_offset] = input[base_offset] + bias[bias_offset] + residual[base_offset];\n base_offset += TPB;\n bias_offset += TPB;\n }\n}\ntemplate __global__ void BiasAddKernel(float const*, float const*, float const*, float*);\ntemplate __global__ void BiasAddKernel(float const*, float const*, float const*, float*);\ntemplate __global__ void BiasAddKernel(float const*, float const*, float const*, float*);\ntemplate __global__ void BiasAddKernel(half const*, half const*, half const*, half*);\ntemplate __global__ void BiasAddKernel(half const*, half const*, half const*, half*);\ntemplate __global__ void BiasAddKernel(half const*, half const*, half const*, half*);\ntemplate \nvoid LaunchBiasAddKernel(cudaStream_t stream, int32_t grid_size, int32_t num_channels, T const* input, T const* bias, T const* residual, T* output) {\n constexpr int32_t TPB = 320; \n switch (num_channels) {\n case 320:\n (BiasAddKernel)<<>>(input, bias, residual, output);\n break;\n case 640:\n (BiasAddKernel)<<>>(input, bias, residual, output);\n break;\n case 1280:\n (BiasAddKernel)<<>>(input, bias, residual, output);\n break;\n default:\n ORT_NOT_IMPLEMENTED(\"Not implemented\");\n }\n}\ntemplate void LaunchBiasAddKernel(cudaStream_t stream, int32_t grid_size, int32_t num_channels, float const* input, float const* bias, float const* residual, float* output);\ntemplate void LaunchBiasAddKernel(cudaStream_t stream, int32_t grid_size, int32_t num_channels, half const* input, half const* bias, half const* residual, half* output);\n} \n} \n} \n\n###", "hip": " #include \"hip/hip_runtime.h\"\n\n\n\n\n#include \n#include \"core/providers/rocm/cu_inc/common.cuh\"\n#include \"contrib_ops/rocm/diffusion/bias_add_impl.h\"\nusing namespace onnxruntime::rocm;\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\ntemplate \n__global__ void BiasAddKernel(T const* input, T const* bias, T const* residual, T* output) {\n int32_t base_offset = blockIdx.x * C + threadIdx.x;\n int32_t bias_offset = threadIdx.x;\n#pragma unroll\n for (int32_t i = 0; i < C / TPB; ++i) {\n output[base_offset] = input[base_offset] + bias[bias_offset] + residual[base_offset];\n base_offset += TPB;\n bias_offset += TPB;\n }\n}\ntemplate __global__ void BiasAddKernel(float const*, float const*, float const*, float*);\ntemplate __global__ void BiasAddKernel(float const*, float const*, float const*, float*);\ntemplate __global__ void BiasAddKernel(float const*, float const*, float const*, float*);\ntemplate __global__ void BiasAddKernel(half const*, half const*, half const*, half*);\ntemplate __global__ void BiasAddKernel(half const*, half const*, half const*, half*);\ntemplate __global__ void BiasAddKernel(half const*, half const*, half const*, half*);\ntemplate \nvoid LaunchBiasAddKernel(hipStream_t stream, int32_t grid_size, int32_t num_channels, T const* input, T const* bias, T const* residual, T* output) {\n constexpr int32_t TPB = 320; \n switch (num_channels) {\n case 320:\n (BiasAddKernel)<<>>(input, bias, residual, output);\n break;\n case 640:\n (BiasAddKernel)<<>>(input, bias, residual, output);\n break;\n case 1280:\n (BiasAddKernel)<<>>(input, bias, residual, output);\n break;\n default:\n ORT_NOT_IMPLEMENTED(\"Not implemented\");\n }\n}\ntemplate void LaunchBiasAddKernel(hipStream_t stream, int32_t grid_size, int32_t num_channels, float const* input, float const* bias, float const* residual, float* output);\ntemplate void LaunchBiasAddKernel(hipStream_t stream, int32_t grid_size, int32_t num_channels, half const* input, half const* bias, half const* residual, half* output);\n} \n} \n} ###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nStatus RangeImpl(cudaStream_t stream, const T start, const T delta, const int count, T* output);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nStatus RangeImpl(hipStream_t stream, const T start, const T delta, const int count, T* output);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/common/status.h\"\n#include \n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\ntemplate \nvoid LaunchBiasAddKernel(cudaStream_t stream, int32_t grid_size, int32_t num_channels,\n T const* input, T const* bias, T const* residual, T* output);\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/common/status.h\"\n#include \n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\ntemplate \nvoid LaunchBiasAddKernel(hipStream_t stream, int32_t grid_size, int32_t num_channels,\n T const* input, T const* bias, T const* residual, T* output);\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n\n\n#include \"core/providers/cuda/cuda_common.h\"\n#include \"contrib_ops/cuda/diffusion/bias_split_gelu.h\"\n#include \"contrib_ops/cuda/diffusion/bias_split_gelu_impl.h\"\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n#define REGISTER_KERNEL_TYPED(T) ONNX_OPERATOR_TYPED_KERNEL_EX( BiasSplitGelu, kMSDomain, 1, T, kCudaExecutionProvider, (*KernelDefBuilder::Create()) .TypeConstraint(\"T\", DataTypeImpl::GetTensorType()), BiasSplitGelu);\nREGISTER_KERNEL_TYPED(MLFloat16);\nREGISTER_KERNEL_TYPED(float);\nusing namespace ONNX_NAMESPACE;\ntemplate \nBiasSplitGelu::BiasSplitGelu(const OpKernelInfo& op_info) : CudaKernel(op_info) {\n}\ntemplate \nStatus BiasSplitGelu::ComputeInternal(OpKernelContext* context) const {\n const Tensor* input = context->Input(0);\n const auto& input_dims = input->Shape().GetDims();\n if (input_dims.size() != 3) {\n return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, \"input is expected to have 3 dimensions, got \", input_dims.size());\n }\n if (input_dims[2] != 2560 && input_dims[2] != 5120 && input_dims[2] != 10240) {\n return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, \"hidden size should be 2560, 5120 or 10240, got \", input_dims[2]);\n }\n const Tensor* bias = context->Input(1);\n const auto& bias_dims = bias->Shape().GetDims();\n if (bias_dims.size() != 1) {\n return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, \"bias is expected to have 1 dimensions, got \", bias_dims.size());\n }\n if (bias_dims[0] != input_dims[2]) {\n return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, \"last dimension of input and bias are not the same\");\n }\n TensorShapeVector output_shape = input->Shape().AsShapeVector();\n output_shape[2] = input_dims[2] / 2;\n Tensor* output = context->Output(0, output_shape);\n typedef typename ToCudaType::MappedType CudaT;\n const int32_t grid_size = static_cast(input_dims[0] * input_dims[1]);\n const int32_t half_hidden_size = static_cast(input_dims[2] / 2);\n LaunchBiasSplitGeluKernel(Stream(context), grid_size, half_hidden_size, reinterpret_cast(input->Data()), reinterpret_cast(bias->Data()), reinterpret_cast(output->MutableData()));\n CUDA_RETURN_IF_ERROR(cudaPeekAtLastError());\n return Status::OK();\n}\n} \n} \n} \n\n###", "hip": " \n\n#include \"core/providers/rocm/rocm_common.h\"\n#include \"contrib_ops/rocm/diffusion/bias_split_gelu.h\"\n#include \"contrib_ops/rocm/diffusion/bias_split_gelu_impl.h\"\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n#define REGISTER_KERNEL_TYPED(T) ONNX_OPERATOR_TYPED_KERNEL_EX( BiasSplitGelu, kMSDomain, 1, T, kRocmExecutionProvider, (*KernelDefBuilder::Create()) .TypeConstraint(\"T\", DataTypeImpl::GetTensorType()), BiasSplitGelu);\nREGISTER_KERNEL_TYPED(MLFloat16);\nREGISTER_KERNEL_TYPED(float);\nusing namespace ONNX_NAMESPACE;\ntemplate \nBiasSplitGelu::BiasSplitGelu(const OpKernelInfo& op_info) : RocmKernel(op_info) {\n}\ntemplate \nStatus BiasSplitGelu::ComputeInternal(OpKernelContext* context) const {\n const Tensor* input = context->Input(0);\n const auto& input_dims = input->Shape().GetDims();\n if (input_dims.size() != 3) {\n return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, \"input is expected to have 3 dimensions, got \", input_dims.size());\n }\n if (input_dims[2] != 2560 && input_dims[2] != 5120 && input_dims[2] != 10240) {\n return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, \"hidden size should be 2560, 5120 or 10240, got \", input_dims[2]);\n }\n const Tensor* bias = context->Input(1);\n const auto& bias_dims = bias->Shape().GetDims();\n if (bias_dims.size() != 1) {\n return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, \"bias is expected to have 1 dimensions, got \", bias_dims.size());\n }\n if (bias_dims[0] != input_dims[2]) {\n return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, \"last dimension of input and bias are not the same\");\n }\n TensorShapeVector output_shape = input->Shape().AsShapeVector();\n output_shape[2] = input_dims[2] / 2;\n Tensor* output = context->Output(0, output_shape);\n typedef typename ToHipType::MappedType HipT;\n const int32_t grid_size = static_cast(input_dims[0] * input_dims[1]);\n const int32_t half_hidden_size = static_cast(input_dims[2] / 2);\n LaunchBiasSplitGeluKernel(Stream(context), grid_size, half_hidden_size, reinterpret_cast(input->Data()), reinterpret_cast(bias->Data()), reinterpret_cast(output->MutableData()));\n HIP_RETURN_IF_ERROR(hipPeekAtLastError());\n return Status::OK();\n}\n} \n} \n} ###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\nusing namespace onnxruntime::cuda;\n\ntemplate \nclass BiasSplitGelu final : public CudaKernel {\n public:\n BiasSplitGelu(const OpKernelInfo& op_kernel_info);\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\nusing namespace onnxruntime::rocm;\n\ntemplate \nclass BiasSplitGelu final : public RocmKernel {\n public:\n BiasSplitGelu(const OpKernelInfo& op_kernel_info);\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n\n\n\n\n#include \n#include \"core/providers/cuda/cu_inc/common.cuh\"\n#include \"contrib_ops/cuda/diffusion/bias_split_gelu_impl.h\"\nusing namespace onnxruntime::cuda;\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\ntemplate \n__global__ void biasSplitGeluKernel(T const* input, T const* bias, T* output) {\n int32_t index_input = blockIdx.x * HHS * 2 + threadIdx.x;\n int32_t index_output = blockIdx.x * HHS + threadIdx.x;\n int32_t index_bias = threadIdx.x;\n#pragma unroll\n for (int32_t i = 0; i < HHS / TPB; ++i) {\n auto value_left = (float)(input[index_input] + bias[index_bias]);\n auto value_right = (float)(input[index_input + HHS] + bias[index_bias + HHS]);\n \n float gelu_right = value_right * 0.5f * (erff(value_right / 1.41421356237f) + 1.0f);\n float result = value_left * gelu_right;\n output[index_output] = static_cast(result);\n index_input += TPB;\n index_output += TPB;\n index_bias += TPB;\n }\n return;\n}\ntemplate \nvoid LaunchBiasSplitGeluKernel(cudaStream_t stream, int32_t grid_size, int32_t half_hidden_size, T const* input, T const* bias, T* output) {\n constexpr int32_t TPB = 256; \n switch (half_hidden_size) {\n case 1280:\n (biasSplitGeluKernel)<<>>(input, bias, output);\n break;\n case 2560:\n (biasSplitGeluKernel)<<>>(input, bias, output);\n break;\n case 5120:\n (biasSplitGeluKernel)<<>>(input, bias, output);\n break;\n default:\n ORT_NOT_IMPLEMENTED(\"Not implemented\");\n }\n}\ntemplate __global__ void biasSplitGeluKernel(float const*, float const*, float*);\ntemplate __global__ void biasSplitGeluKernel(float const*, float const*, float*);\ntemplate __global__ void biasSplitGeluKernel(float const*, float const*, float*);\ntemplate __global__ void biasSplitGeluKernel(half const*, half const*, half*);\ntemplate __global__ void biasSplitGeluKernel(half const*, half const*, half*);\ntemplate __global__ void biasSplitGeluKernel(half const*, half const*, half*);\ntemplate void LaunchBiasSplitGeluKernel(cudaStream_t stream, int32_t grid_size, int32_t half_hidden_size, float const* input, float const* bias, float* output);\ntemplate void LaunchBiasSplitGeluKernel(cudaStream_t stream, int32_t grid_size, int32_t half_hidden_size, half const* input, half const* bias, half* output);\n} \n} \n} \n\n###", "hip": " #include \"hip/hip_runtime.h\"\n\n\n\n\n#include \n#include \"core/providers/rocm/cu_inc/common.cuh\"\n#include \"contrib_ops/rocm/diffusion/bias_split_gelu_impl.h\"\nusing namespace onnxruntime::rocm;\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\ntemplate \n__global__ void biasSplitGeluKernel(T const* input, T const* bias, T* output) {\n int32_t index_input = blockIdx.x * HHS * 2 + threadIdx.x;\n int32_t index_output = blockIdx.x * HHS + threadIdx.x;\n int32_t index_bias = threadIdx.x;\n#pragma unroll\n for (int32_t i = 0; i < HHS / TPB; ++i) {\n auto value_left = (float)(input[index_input] + bias[index_bias]);\n auto value_right = (float)(input[index_input + HHS] + bias[index_bias + HHS]);\n \n float gelu_right = value_right * 0.5f * (erff(value_right / 1.41421356237f) + 1.0f);\n float result = value_left * gelu_right;\n output[index_output] = static_cast(result);\n index_input += TPB;\n index_output += TPB;\n index_bias += TPB;\n }\n return;\n}\ntemplate \nvoid LaunchBiasSplitGeluKernel(hipStream_t stream, int32_t grid_size, int32_t half_hidden_size, T const* input, T const* bias, T* output) {\n constexpr int32_t TPB = 256; \n switch (half_hidden_size) {\n case 1280:\n (biasSplitGeluKernel)<<>>(input, bias, output);\n break;\n case 2560:\n (biasSplitGeluKernel)<<>>(input, bias, output);\n break;\n case 5120:\n (biasSplitGeluKernel)<<>>(input, bias, output);\n break;\n default:\n ORT_NOT_IMPLEMENTED(\"Not implemented\");\n }\n}\ntemplate __global__ void biasSplitGeluKernel(float const*, float const*, float*);\ntemplate __global__ void biasSplitGeluKernel(float const*, float const*, float*);\ntemplate __global__ void biasSplitGeluKernel(float const*, float const*, float*);\ntemplate __global__ void biasSplitGeluKernel(half const*, half const*, half*);\ntemplate __global__ void biasSplitGeluKernel(half const*, half const*, half*);\ntemplate __global__ void biasSplitGeluKernel(half const*, half const*, half*);\ntemplate void LaunchBiasSplitGeluKernel(hipStream_t stream, int32_t grid_size, int32_t half_hidden_size, float const* input, float const* bias, float* output);\ntemplate void LaunchBiasSplitGeluKernel(hipStream_t stream, int32_t grid_size, int32_t half_hidden_size, half const* input, half const* bias, half* output);\n} \n} \n} ###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/common/status.h\"\n#include \n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\ntemplate \nvoid LaunchBiasSplitGeluKernel(cudaStream_t stream, int32_t grid_size, int32_t half_hidden_size,\n T const* input, T const* bias, T* output);\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/common/status.h\"\n#include \n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\ntemplate \nvoid LaunchBiasSplitGeluKernel(hipStream_t stream, int32_t grid_size, int32_t half_hidden_size,\n T const* input, T const* bias, T* output);\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\nusing namespace onnxruntime::cuda;\n\nclass GroupNorm final : public CudaKernel {\n public:\n GroupNorm(const OpKernelInfo& op_kernel_info);\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n bool use_swish_activation_;\n float epsilon_;\n int num_groups_;\n};\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\nusing namespace onnxruntime::rocm;\n\nclass GroupNorm final : public RocmKernel {\n public:\n GroupNorm(const OpKernelInfo& op_kernel_info);\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n bool use_swish_activation_;\n float epsilon_;\n int num_groups_;\n};\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/common/common.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\nusing namespace onnxruntime::cuda;\n\ntemplate \nclass GridSample final : public CudaKernel {\n public:\n explicit GridSample(const OpKernelInfo& info);\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n int64_t mode_i_; // 0: bilinear (default), 1: nearest 2: bicubic\n int64_t padding_mode_i_; // 0:'zeros', 1: 'border', 2:'reflection'\n int64_t align_corners_;\n};\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/common/common.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\nusing namespace onnxruntime::rocm;\n\ntemplate \nclass GridSample final : public RocmKernel {\n public:\n explicit GridSample(const OpKernelInfo& info);\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n int64_t mode_i_; // 0: bilinear (default), 1: nearest 2: bicubic\n int64_t padding_mode_i_; // 0:'zeros', 1: 'border', 2:'reflection'\n int64_t align_corners_;\n};\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\ntemplate \nvoid GridSampleImpl(\n cudaStream_t stream,\n const T* input_data,\n const T* grid_data,\n const int64_t mode,\n const int64_t padding_mode,\n const int64_t align_corners,\n const int64_t dims_input[4],\n const int64_t H_out,\n const int64_t W_out,\n T* output_data);\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\ntemplate \nvoid GridSampleImpl(\n hipStream_t stream,\n const T* input_data,\n const T* grid_data,\n const int64_t mode,\n const int64_t padding_mode,\n const int64_t align_corners,\n const int64_t dims_input[4],\n const int64_t H_out,\n const int64_t W_out,\n T* output_data);\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/cuda/nn/layer_norm.h\"\n#include \"core/providers/cuda/cuda_common.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\n// LayerNormalization is an official ONNX operator in opset 17.\n#define REGISTER_KERNEL_TYPED(T, U, V) \\\n ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_EX(LayerNormalization, kOnnxDomain, 1, 16, T##_##U##_##V, \\\n kCudaExecutionProvider, \\\n (*KernelDefBuilder::Create()) \\\n .TypeConstraint(\"T\", DataTypeImpl::GetTensorType()) \\\n .TypeConstraint(\"U\", DataTypeImpl::GetTensorType()) \\\n .TypeConstraint(\"V\", DataTypeImpl::GetTensorType()), \\\n onnxruntime::cuda::LayerNorm); \\\n ONNX_OPERATOR_TYPED_KERNEL_EX(SimplifiedLayerNormalization, kOnnxDomain, 1, T##_##U##_##V, kCudaExecutionProvider, \\\n (*KernelDefBuilder::Create()) \\\n .TypeConstraint(\"T\", DataTypeImpl::GetTensorType()) \\\n .TypeConstraint(\"U\", DataTypeImpl::GetTensorType()) \\\n .TypeConstraint(\"V\", DataTypeImpl::GetTensorType()), \\\n onnxruntime::cuda::LayerNorm);\n\nREGISTER_KERNEL_TYPED(float, float, float)\nREGISTER_KERNEL_TYPED(double, double, double)\nREGISTER_KERNEL_TYPED(MLFloat16, float, MLFloat16)\nREGISTER_KERNEL_TYPED(float, float, MLFloat16)\nREGISTER_KERNEL_TYPED(MLFloat16, float, float)\nREGISTER_KERNEL_TYPED(BFloat16, float, BFloat16)\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/rocm/nn/layer_norm.h\"\n#include \"core/providers/rocm/rocm_common.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\n// LayerNormalization is an official ONNX operator in opset 17.\n#define REGISTER_KERNEL_TYPED(T, U, V) \\\n ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_EX(LayerNormalization, kOnnxDomain, 1, 16, T##_##U##_##V, \\\n kRocmExecutionProvider, \\\n (*KernelDefBuilder::Create()) \\\n .TypeConstraint(\"T\", DataTypeImpl::GetTensorType()) \\\n .TypeConstraint(\"U\", DataTypeImpl::GetTensorType()) \\\n .TypeConstraint(\"V\", DataTypeImpl::GetTensorType()), \\\n onnxruntime::rocm::LayerNorm); \\\n ONNX_OPERATOR_TYPED_KERNEL_EX(SimplifiedLayerNormalization, kOnnxDomain, 1, T##_##U##_##V, kRocmExecutionProvider, \\\n (*KernelDefBuilder::Create()) \\\n .TypeConstraint(\"T\", DataTypeImpl::GetTensorType()) \\\n .TypeConstraint(\"U\", DataTypeImpl::GetTensorType()) \\\n .TypeConstraint(\"V\", DataTypeImpl::GetTensorType()), \\\n onnxruntime::rocm::LayerNorm);\n\nREGISTER_KERNEL_TYPED(float, float, float)\nREGISTER_KERNEL_TYPED(double, double, double)\nREGISTER_KERNEL_TYPED(MLFloat16, float, MLFloat16)\nREGISTER_KERNEL_TYPED(float, float, MLFloat16)\nREGISTER_KERNEL_TYPED(MLFloat16, float, float)\nREGISTER_KERNEL_TYPED(BFloat16, float, BFloat16)\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"core/providers/cuda/cuda_common.h\"\n#include \"core/framework/random_generator.h\"\n\nusing namespace onnxruntime::cuda;\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\ntemplate \nvoid BiasDropoutKernelImpl(const cudaDeviceProp& prop, cudaStream_t stream, const int64_t N,\n const int64_t mask_element_count, const fast_divmod fdm_dim, const float ratio,\n PhiloxGenerator& generator, const T* X_data, const T* bias_data, const T* residual_data,\n T* Y_data, void* mask_data, bool has_same_shape_bias, bool use_bitmask);\n\ntemplate \nclass BiasDropout final : public CudaKernel {\n public:\n BiasDropout(const OpKernelInfo& info) : CudaKernel(info) {\n int64_t seed = 0;\n if (info.GetAttr(\"seed\", &seed).IsOK()) {\n generator_ = std::make_unique(static_cast(seed));\n }\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n mutable std::unique_ptr generator_;\n static constexpr float default_ratio_ = 0.5f;\n};\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"core/providers/rocm/rocm_common.h\"\n#include \"core/framework/random_generator.h\"\n\nusing namespace onnxruntime::rocm;\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\ntemplate \nvoid BiasDropoutKernelImpl(const hipDeviceProp_t& prop, hipStream_t stream, const int64_t N,\n const int64_t mask_element_count, const fast_divmod fdm_dim, const float ratio,\n PhiloxGenerator& generator, const T* X_data, const T* bias_data, const T* residual_data,\n T* Y_data, void* mask_data, bool has_same_shape_bias, bool use_bitmask);\n\ntemplate \nclass BiasDropout final : public RocmKernel {\n public:\n BiasDropout(const OpKernelInfo& info) : RocmKernel(info) {\n int64_t seed = 0;\n if (info.GetAttr(\"seed\", &seed).IsOK()) {\n generator_ = std::make_unique(static_cast(seed));\n }\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n mutable std::unique_ptr generator_;\n static constexpr float default_ratio_ = 0.5f;\n};\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/math/binary_elementwise_ops_impl.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\n// define the device functors that perform the computation on scalars\n\n#define OP_FUNCTOR_DEFINITION(name, expr) \\\n template \\\n struct OP_##name { \\\n __device__ __inline__ T operator()(T1 a, T2 b) const { \\\n return (expr); \\\n } \\\n };\n\n#define BINARY_OP_NAME_EXPR(name, expr) \\\n OP_FUNCTOR_DEFINITION(name, expr)\n\nBINARY_OPS()\n\nOP_FUNCTOR_DEFINITION(Pow, _Pow(a, b))\n\n#undef BINARY_OP_NAME_EXPR\n\n#define BINARY_OP_NAME_EXPR2(name, expr) \\\n OP_FUNCTOR_DEFINITION(name, expr)\n\nBINARY_OPS2()\n\n#undef BINARY_OP_NAME_EXPR2\n\n#undef OP_FUNCTOR_DEFINITION\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/math/binary_elementwise_ops_impl.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\n// define the device functors that perform the computation on scalars\n\n#define OP_FUNCTOR_DEFINITION(name, expr) \\\n template \\\n struct OP_##name { \\\n __device__ __inline__ T operator()(T1 a, T2 b) const { \\\n return (expr); \\\n } \\\n };\n\n#define BINARY_OP_NAME_EXPR(name, expr) \\\n OP_FUNCTOR_DEFINITION(name, expr)\n\nBINARY_OPS()\n\nOP_FUNCTOR_DEFINITION(Pow, _Pow(a, b))\n\n#undef BINARY_OP_NAME_EXPR\n\n#define BINARY_OP_NAME_EXPR2(name, expr) \\\n OP_FUNCTOR_DEFINITION(name, expr)\n\nBINARY_OPS2()\n\n#undef BINARY_OP_NAME_EXPR2\n\n#undef OP_FUNCTOR_DEFINITION\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"contrib_ops/cuda/math/bias_softmax.h\"\n\n#include \"core/providers/cuda/cuda_common.h\"\n#include \"contrib_ops/cuda/math/bias_softmax_impl.h\"\n\nusing namespace onnxruntime;\nusing namespace onnxruntime::cuda;\nusing namespace onnxruntime::contrib::cuda;\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\nnamespace {\n\ntemplate \nstruct DispatchBiasSoftmaxImpl {\n Status operator()(cudaStream_t stream, cudnnHandle_t cudnn_handle, Tensor* Y, const Tensor* X, const Tensor* B,\n int element_count, int batch_count, bool is_inner_broadcast, int bias_broadcast_size) {\n typedef typename ToCudaType::MappedType CudaT;\n CudaT* output_data = reinterpret_cast(Y->template MutableData());\n const CudaT* input_data = reinterpret_cast(X->template Data());\n const CudaT* bias_data = reinterpret_cast(B->template Data());\n return BiasSoftmaxImpl(stream, cudnn_handle, output_data, input_data, bias_data, element_count, batch_count,\n is_inner_broadcast, bias_broadcast_size);\n }\n};\n\n} // namespace\n\n// MIOpen doesn't support double so ROCm kernel doesn't have double support for now.\n#ifdef USE_ROCM\n#define BIAS_SOFTMAX_TYPES float, MLFloat16\n#else\n#define BIAS_SOFTMAX_TYPES float, MLFloat16, double\n#endif\n\nONNX_OPERATOR_KERNEL_EX(\n BiasSoftmax, kMSDomain, 1, kCudaExecutionProvider,\n (*KernelDefBuilder::Create()).TypeConstraint(\"T\", BuildKernelDefConstraints()), BiasSoftmax);\n\nStatus BiasSoftmax::ComputeInternal(OpKernelContext* ctx) const {\n const Tensor* X = ctx->Input(0);\n const Tensor* B = ctx->Input(1);\n const TensorShape& X_shape = X->Shape();\n const TensorShape& B_shape = B->Shape();\n Tensor* Y = ctx->Output(0, X_shape);\n\n const int axis = static_cast(HandleNegativeAxis(axis_, X_shape.NumDimensions()));\n const int batch_count = static_cast(X_shape.SizeToDimension(axis));\n const int element_count = static_cast(X_shape.SizeFromDimension(axis));\n int bias_broadcast_size = static_cast(B_shape.Size() / element_count);\n if (is_inner_broadcast_) bias_broadcast_size = batch_count / bias_broadcast_size;\n utils::MLTypeCallDispatcher t_disp(X->GetElementType());\n return t_disp.InvokeRet(Stream(ctx), GetCudnnHandle(ctx), Y, X, B, element_count, batch_count,\n is_inner_broadcast_, bias_broadcast_size);\n}\n\n#undef BIAS_SOFTMAX_TYPES\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"contrib_ops/rocm/math/bias_softmax.h\"\n\n#include \"core/providers/rocm/rocm_common.h\"\n#include \"contrib_ops/rocm/math/bias_softmax_impl.h\"\n\nusing namespace onnxruntime;\nusing namespace onnxruntime::rocm;\nusing namespace onnxruntime::contrib::rocm;\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\nnamespace {\n\ntemplate \nstruct DispatchBiasSoftmaxImpl {\n Status operator()(hipStream_t stream, miopenHandle_t miopen_handle, Tensor* Y, const Tensor* X, const Tensor* B,\n int element_count, int batch_count, bool is_inner_broadcast, int bias_broadcast_size) {\n typedef typename ToHipType::MappedType HipT;\n HipT* output_data = reinterpret_cast(Y->template MutableData());\n const HipT* input_data = reinterpret_cast(X->template Data());\n const HipT* bias_data = reinterpret_cast(B->template Data());\n return BiasSoftmaxImpl(stream, miopen_handle, output_data, input_data, bias_data, element_count, batch_count,\n is_inner_broadcast, bias_broadcast_size);\n }\n};\n\n} // namespace\n\n// MIOpen doesn't support double so ROCm kernel doesn't have double support for now.\n#ifdef USE_ROCM\n#define BIAS_SOFTMAX_TYPES float, MLFloat16\n#else\n#define BIAS_SOFTMAX_TYPES float, MLFloat16, double\n#endif\n\nONNX_OPERATOR_KERNEL_EX(\n BiasSoftmax, kMSDomain, 1, kRocmExecutionProvider,\n (*KernelDefBuilder::Create()).TypeConstraint(\"T\", BuildKernelDefConstraints()), BiasSoftmax);\n\nStatus BiasSoftmax::ComputeInternal(OpKernelContext* ctx) const {\n const Tensor* X = ctx->Input(0);\n const Tensor* B = ctx->Input(1);\n const TensorShape& X_shape = X->Shape();\n const TensorShape& B_shape = B->Shape();\n Tensor* Y = ctx->Output(0, X_shape);\n\n const int axis = static_cast(HandleNegativeAxis(axis_, X_shape.NumDimensions()));\n const int batch_count = static_cast(X_shape.SizeToDimension(axis));\n const int element_count = static_cast(X_shape.SizeFromDimension(axis));\n int bias_broadcast_size = static_cast(B_shape.Size() / element_count);\n if (is_inner_broadcast_) bias_broadcast_size = batch_count / bias_broadcast_size;\n utils::MLTypeCallDispatcher t_disp(X->GetElementType());\n return t_disp.InvokeRet(Stream(ctx), GetMiopenHandle(ctx), Y, X, B, element_count, batch_count,\n is_inner_broadcast_, bias_broadcast_size);\n}\n\n#undef BIAS_SOFTMAX_TYPES\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\n// BiasSoftmax follows the OpSet-11 definision of Softmax Op, that is, the input will be coerced to a 2D tensor\n// using axis attribute, all dims after axis (included) are in the same batch. This is different from definition\n// since OpSet-13. To use BiasSoftmax, during the fusion, if Softmax is OpSet-13 or newer, you can only fuse it\n// when axis attribute is the last dim, othewise, the computation result may be wrong.\nclass BiasSoftmax final : public onnxruntime::cuda::CudaKernel {\n public:\n BiasSoftmax(const OpKernelInfo& info) : CudaKernel{info} {\n info.GetAttrOrDefault(\"axis\", &axis_, static_cast(1));\n int64_t is_inner_broadcast_value;\n ORT_ENFORCE(info.GetAttr(\"is_inner_broadcast\", &is_inner_broadcast_value).IsOK());\n is_inner_broadcast_ = is_inner_broadcast_value != 0;\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n int64_t axis_;\n bool is_inner_broadcast_;\n};\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\n// BiasSoftmax follows the OpSet-11 definision of Softmax Op, that is, the input will be coerced to a 2D tensor\n// using axis attribute, all dims after axis (included) are in the same batch. This is different from definition\n// since OpSet-13. To use BiasSoftmax, during the fusion, if Softmax is OpSet-13 or newer, you can only fuse it\n// when axis attribute is the last dim, othewise, the computation result may be wrong.\nclass BiasSoftmax final : public onnxruntime::rocm::RocmKernel {\n public:\n BiasSoftmax(const OpKernelInfo& info) : RocmKernel{info} {\n info.GetAttrOrDefault(\"axis\", &axis_, static_cast(1));\n int64_t is_inner_broadcast_value;\n ORT_ENFORCE(info.GetAttr(\"is_inner_broadcast\", &is_inner_broadcast_value).IsOK());\n is_inner_broadcast_ = is_inner_broadcast_value != 0;\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n int64_t axis_;\n bool is_inner_broadcast_;\n};\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/cuda_common.h\"\n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\ntemplate \nStatus BiasSoftmaxImpl(cudaStream_t stream, cudnnHandle_t cudnn_handle, T* output_data, const T* input_data,\n const T* bias_data, int element_count, int batch_count, bool is_inner_broadcast,\n int bias_broadcast_size);\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/rocm_common.h\"\n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\ntemplate \nStatus BiasSoftmaxImpl(hipStream_t stream, miopenHandle_t miopen_handle, T* output_data, const T* input_data,\n const T* bias_data, int element_count, int batch_count, bool is_inner_broadcast,\n int bias_broadcast_size);\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n\n\n#include \"contrib_ops/cuda/math/binary_elementwise_ops.h\"\n#include \"contrib_ops/cuda/math/binary_elementwise_ops_impl.h\"\nusing namespace onnxruntime::common;\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n#define CONTRIB_BINARY_ELEMENTWISE_REGISTER_KERNEL_TYPED(x, ver, T) ONNX_OPERATOR_TYPED_KERNEL_EX( x, kMSDomain, ver, T, kCudaExecutionProvider, (*KernelDefBuilder::Create()).TypeConstraint(\"T\", DataTypeImpl::GetTensorType()), x);\n#define CONTRIB_BINARY_ELEMENTWISE_COMPUTE(x, T) template <> Status x::ComputeInternal(OpKernelContext* context) const { BinaryElementwisePreparation prepare; ORT_RETURN_IF_ERROR(Prepare(context, &prepare)); Impl_##x::MappedType>( Stream(context), prepare.output_rank_or_simple_broadcast, &prepare.lhs_padded_strides, reinterpret_cast::MappedType*>(prepare.lhs_tensor->Data()), &prepare.rhs_padded_strides, reinterpret_cast::MappedType*>(prepare.rhs_tensor->Data()), &prepare.fdm_output_strides, prepare.fdm_H, prepare.fdm_C, reinterpret_cast::MappedType*>(prepare.output_tensor->MutableData()), prepare.output_tensor->Shape().Size()); return Status::OK(); }\n#define CONTRIB_BINARY_OP_TYPED(name, ver, T) CONTRIB_BINARY_ELEMENTWISE_REGISTER_KERNEL_TYPED(name, ver, T) CONTRIB_BINARY_ELEMENTWISE_COMPUTE(name, T)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#define CONTRIB_BINARY_OP_HFD(name, ver) CONTRIB_BINARY_OP_TYPED(name, ver, MLFloat16) CONTRIB_BINARY_OP_TYPED(name, ver, float) CONTRIB_BINARY_OP_TYPED(name, ver, double) CONTRIB_BINARY_OP_TYPED(name, ver, BFloat16)\nCONTRIB_BINARY_OP_HFD(BiasGelu, 1)\n} \n} \n} \n\n###", "hip": " \n\n#include \"contrib_ops/rocm/math/binary_elementwise_ops.h\"\n#include \"contrib_ops/rocm/math/binary_elementwise_ops_impl.h\"\nusing namespace onnxruntime::common;\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n#define CONTRIB_BINARY_ELEMENTWISE_REGISTER_KERNEL_TYPED(x, ver, T) ONNX_OPERATOR_TYPED_KERNEL_EX( x, kMSDomain, ver, T, kRocmExecutionProvider, (*KernelDefBuilder::Create()).TypeConstraint(\"T\", DataTypeImpl::GetTensorType()), x);\n#define CONTRIB_BINARY_ELEMENTWISE_COMPUTE(x, T) template <> Status x::ComputeInternal(OpKernelContext* context) const { BinaryElementwisePreparation prepare; ORT_RETURN_IF_ERROR(Prepare(context, &prepare)); Impl_##x::MappedType>( Stream(context), prepare.output_rank_or_simple_broadcast, &prepare.lhs_padded_strides, reinterpret_cast::MappedType*>(prepare.lhs_tensor->Data()), &prepare.rhs_padded_strides, reinterpret_cast::MappedType*>(prepare.rhs_tensor->Data()), &prepare.fdm_output_strides, prepare.fdm_H, prepare.fdm_C, reinterpret_cast::MappedType*>(prepare.output_tensor->MutableData()), prepare.output_tensor->Shape().Size()); return Status::OK(); }\n#define CONTRIB_BINARY_OP_TYPED(name, ver, T) CONTRIB_BINARY_ELEMENTWISE_REGISTER_KERNEL_TYPED(name, ver, T) CONTRIB_BINARY_ELEMENTWISE_COMPUTE(name, T)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#define CONTRIB_BINARY_OP_HFD(name, ver) CONTRIB_BINARY_OP_TYPED(name, ver, MLFloat16) CONTRIB_BINARY_OP_TYPED(name, ver, float) CONTRIB_BINARY_OP_TYPED(name, ver, double) CONTRIB_BINARY_OP_TYPED(name, ver, BFloat16)\nCONTRIB_BINARY_OP_HFD(BiasGelu, 1)\n} \n} \n} ###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/math/binary_elementwise_ops.h\"\n#include \"core/providers/cuda/cuda_common.h\"\n#include \"core/providers/cuda/shared_inc/fast_divmod.h\"\n#include \"core/providers/cpu/tensor/utils.h\"\n\nusing namespace onnxruntime::cuda;\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\n// AddGelu fuse Add + Gelu\ntemplate \nclass BiasGelu final : public BinaryElementwise {\n public:\n BiasGelu(const OpKernelInfo& info) : BinaryElementwise(info) {\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/math/binary_elementwise_ops.h\"\n#include \"core/providers/rocm/rocm_common.h\"\n#include \"core/providers/rocm/shared_inc/fast_divmod.h\"\n#include \"core/providers/cpu/tensor/utils.h\"\n\nusing namespace onnxruntime::rocm;\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\n// AddGelu fuse Add + Gelu\ntemplate \nclass BiasGelu final : public BinaryElementwise {\n public:\n BiasGelu(const OpKernelInfo& info) : BinaryElementwise(info) {\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n#include \n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n\nusing namespace onnxruntime::cuda;\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n// These macros simplifies coding. To add a new op with following steps:\n// 1. Add a new entry in CONTRIB_BINARY_OPS() list\n// 2. (optional) Define templated single element operator in binary_elementwise_ops_impl.cu\n// 3. (optional) Implement specialized single element operator\n// 4. Add op kernel class definition in binary_elementwise_ops.h\n// 5. Add op kernel registration and compute specialization in binary_elementwise_ops.cc\n#define CONTRIB_BINARY_OPS() \\\n CONTRIB_BINARY_OP_NAME_EXPR(BiasGelu, _Gelu(a + b))\n\n// NOTE that cu files are compiled with nvcc and should not refer to any onnxruntime headers\n// so struct BinaryElementwisePreparation cannot be used here\n#define CONTRIB_BINARY_ELEMENTWISE_IMPL_DECLARATION(name) \\\n template \\\n void Impl_##name( \\\n cudaStream_t stream, \\\n int32_t output_rank_or_simple_broadcast, \\\n const TArray* lhs_padded_strides, \\\n const T* lhs_data, \\\n const TArray* rhs_padded_strides, \\\n const T* rhs_data, \\\n const TArray* fdm_output_strides, \\\n const onnxruntime::cuda::fast_divmod& fdm_H, \\\n const onnxruntime::cuda::fast_divmod& fdm_C, \\\n T* output_data, \\\n size_t count)\n#define CONTRIB_BINARY_OP_NAME_EXPR(name, expr) CONTRIB_BINARY_ELEMENTWISE_IMPL_DECLARATION(name);\nCONTRIB_BINARY_OPS()\n#undef CONTRIB_BINARY_OP_NAME_EXPR\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " #include \n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n\nusing namespace onnxruntime::rocm;\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n// These macros simplifies coding. To add a new op with following steps:\n// 1. Add a new entry in CONTRIB_BINARY_OPS() list\n// 2. (optional) Define templated single element operator in binary_elementwise_ops_impl.cu\n// 3. (optional) Implement specialized single element operator\n// 4. Add op kernel class definition in binary_elementwise_ops.h\n// 5. Add op kernel registration and compute specialization in binary_elementwise_ops.cc\n#define CONTRIB_BINARY_OPS() \\\n CONTRIB_BINARY_OP_NAME_EXPR(BiasGelu, _Gelu(a + b))\n\n// NOTE that cu files are compiled with nvcc and should not refer to any onnxruntime headers\n// so struct BinaryElementwisePreparation cannot be used here\n#define CONTRIB_BINARY_ELEMENTWISE_IMPL_DECLARATION(name) \\\n template \\\n void Impl_##name( \\\n hipStream_t stream, \\\n int32_t output_rank_or_simple_broadcast, \\\n const TArray* lhs_padded_strides, \\\n const T* lhs_data, \\\n const TArray* rhs_padded_strides, \\\n const T* rhs_data, \\\n const TArray* fdm_output_strides, \\\n const onnxruntime::rocm::fast_divmod& fdm_H, \\\n const onnxruntime::rocm::fast_divmod& fdm_C, \\\n T* output_data, \\\n size_t count)\n#define CONTRIB_BINARY_OP_NAME_EXPR(name, expr) CONTRIB_BINARY_ELEMENTWISE_IMPL_DECLARATION(name);\nCONTRIB_BINARY_OPS()\n#undef CONTRIB_BINARY_OP_NAME_EXPR\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/cuda/nn/dropout.h\"\n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\nONNX_OPERATOR_KERNEL_EX(BitmaskDropout, kMSDomain, 1, kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", BuildKernelDefConstraints())\n .TypeConstraint(\"T1\", BuildKernelDefConstraints())\n .TypeConstraint(\"T2\", DataTypeImpl::GetTensorType())\n .TypeConstraint(\"T3\", DataTypeImpl::GetTensorType())\n .InputMemoryType(OrtMemTypeCPUInput, 1)\n .InputMemoryType(OrtMemTypeCPUInput, 2),\n onnxruntime::cuda::Dropout);\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/rocm/nn/dropout.h\"\n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\nONNX_OPERATOR_KERNEL_EX(BitmaskDropout, kMSDomain, 1, kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", BuildKernelDefConstraints())\n .TypeConstraint(\"T1\", BuildKernelDefConstraints())\n .TypeConstraint(\"T2\", DataTypeImpl::GetTensorType())\n .TypeConstraint(\"T3\", DataTypeImpl::GetTensorType())\n .InputMemoryType(OrtMemTypeCPUInput, 1)\n .InputMemoryType(OrtMemTypeCPUInput, 2),\n onnxruntime::rocm::Dropout);\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/cuda/math/matmul.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\n#define REGISTER_KERNEL_TYPED(op_name, T) \\\n ONNX_OPERATOR_TYPED_KERNEL_EX( \\\n op_name, \\\n kMSDomain, \\\n 1, \\\n T, \\\n kCudaExecutionProvider, \\\n (*KernelDefBuilder::Create()) \\\n .TypeConstraint(\"T\", DataTypeImpl::GetTensorType()), \\\n onnxruntime::cuda::MatMul);\n\n// TransposeMatMul is kept here for backward compatibility\nREGISTER_KERNEL_TYPED(TransposeMatMul, float)\nREGISTER_KERNEL_TYPED(TransposeMatMul, double)\nREGISTER_KERNEL_TYPED(TransposeMatMul, MLFloat16)\nREGISTER_KERNEL_TYPED(TransposeMatMul, BFloat16)\n\nREGISTER_KERNEL_TYPED(FusedMatMul, float)\nREGISTER_KERNEL_TYPED(FusedMatMul, double)\nREGISTER_KERNEL_TYPED(FusedMatMul, MLFloat16)\nREGISTER_KERNEL_TYPED(FusedMatMul, BFloat16)\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/rocm/math/matmul.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\n#define REGISTER_KERNEL_TYPED(op_name, T) \\\n ONNX_OPERATOR_TYPED_KERNEL_EX( \\\n op_name, \\\n kMSDomain, \\\n 1, \\\n T, \\\n kRocmExecutionProvider, \\\n (*KernelDefBuilder::Create()) \\\n .TypeConstraint(\"T\", DataTypeImpl::GetTensorType()), \\\n onnxruntime::rocm::MatMul);\n\n// TransposeMatMul is kept here for backward compatibility\nREGISTER_KERNEL_TYPED(TransposeMatMul, float)\nREGISTER_KERNEL_TYPED(TransposeMatMul, double)\nREGISTER_KERNEL_TYPED(TransposeMatMul, MLFloat16)\nREGISTER_KERNEL_TYPED(TransposeMatMul, BFloat16)\n\nREGISTER_KERNEL_TYPED(FusedMatMul, float)\nREGISTER_KERNEL_TYPED(FusedMatMul, double)\nREGISTER_KERNEL_TYPED(FusedMatMul, MLFloat16)\nREGISTER_KERNEL_TYPED(FusedMatMul, BFloat16)\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"contrib_ops/cuda/math/isfinite.h\"\n#include \"isfinite_impl.h\"\n\nusing namespace ONNX_NAMESPACE;\nusing namespace onnxruntime::common;\nnamespace onnxruntime {\nnamespace cuda {\n\n#define REGISTER_ISALLFINITE_KERNEL_TYPED(T) \\\n ONNX_OPERATOR_TYPED_KERNEL_EX( \\\n IsAllFinite, \\\n kMSDomain, \\\n 1, \\\n T, \\\n kCudaExecutionProvider, \\\n (*KernelDefBuilder::Create()) \\\n .TypeConstraint(\"V\", DataTypeImpl::GetTensorType()) \\\n .TypeConstraint(\"T\", DataTypeImpl::GetTensorType()), \\\n IsAllFiniteOp);\n\ntemplate \nStatus IsAllFiniteOp::ComputeInternal(OpKernelContext* context) const {\n typedef typename ToCudaType::MappedType TSrcCuda;\n\n // Get Input tensor count.\n const auto total_tensor_count = context->InputCount();\n\n // Initialize the output to true. GPU kernel will set it\n // to false if any value in any tensor is non-finite.\n Tensor& output = *context->Output(0, {});\n auto* output_data = reinterpret_cast::MappedType*>(output.MutableData());\n CUDA_RETURN_IF_ERROR(cudaMemsetAsync(output_data, int(true), sizeof(bool), Stream(context)));\n\n std::vector> grouped_tensor_pointers(total_tensor_count);\n std::vector tensor_sizes(total_tensor_count);\n\n for (int i = 0; i < total_tensor_count; ++i) {\n const auto& input = context->Input(i);\n grouped_tensor_pointers[i] = {const_cast(input->Data())};\n tensor_sizes[i] = static_cast(input->Shape().Size());\n }\n\n typedef IsAllFiniteFunctor TFunctor;\n TFunctor functor;\n\n // Check if all values are finite and write true to output.\n // Otherwise, false will be written.\n launch_multi_tensor_functor<1, TFunctor>(\n Stream(context), 2048 * 32, tensor_sizes, grouped_tensor_pointers, functor, output_data, isinf_only_, isnan_only_);\n\n return Status::OK();\n}\n\nREGISTER_ISALLFINITE_KERNEL_TYPED(MLFloat16)\nREGISTER_ISALLFINITE_KERNEL_TYPED(float)\nREGISTER_ISALLFINITE_KERNEL_TYPED(double)\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"contrib_ops/rocm/math/isfinite.h\"\n#include \"isfinite_impl.h\"\n\nusing namespace ONNX_NAMESPACE;\nusing namespace onnxruntime::common;\nnamespace onnxruntime {\nnamespace rocm {\n\n#define REGISTER_ISALLFINITE_KERNEL_TYPED(T) \\\n ONNX_OPERATOR_TYPED_KERNEL_EX( \\\n IsAllFinite, \\\n kMSDomain, \\\n 1, \\\n T, \\\n kRocmExecutionProvider, \\\n (*KernelDefBuilder::Create()) \\\n .TypeConstraint(\"V\", DataTypeImpl::GetTensorType()) \\\n .TypeConstraint(\"T\", DataTypeImpl::GetTensorType()), \\\n IsAllFiniteOp);\n\ntemplate \nStatus IsAllFiniteOp::ComputeInternal(OpKernelContext* context) const {\n typedef typename ToHipType::MappedType TSrcCuda;\n\n // Get Input tensor count.\n const auto total_tensor_count = context->InputCount();\n\n // Initialize the output to true. GPU kernel will set it\n // to false if any value in any tensor is non-finite.\n Tensor& output = *context->Output(0, {});\n auto* output_data = reinterpret_cast::MappedType*>(output.MutableData());\n HIP_RETURN_IF_ERROR(hipMemsetAsync(output_data, int(true), sizeof(bool), Stream(context)));\n\n std::vector> grouped_tensor_pointers(total_tensor_count);\n std::vector tensor_sizes(total_tensor_count);\n\n for (int i = 0; i < total_tensor_count; ++i) {\n const auto& input = context->Input(i);\n grouped_tensor_pointers[i] = {const_cast(input->Data())};\n tensor_sizes[i] = static_cast(input->Shape().Size());\n }\n\n typedef IsAllFiniteFunctor TFunctor;\n TFunctor functor;\n\n // Check if all values are finite and write true to output.\n // Otherwise, false will be written.\n launch_multi_tensor_functor<1, TFunctor>(\n Stream(context), 2048 * 32, tensor_sizes, grouped_tensor_pointers, functor, output_data, isinf_only_, isnan_only_);\n\n return Status::OK();\n}\n\nREGISTER_ISALLFINITE_KERNEL_TYPED(MLFloat16)\nREGISTER_ISALLFINITE_KERNEL_TYPED(float)\nREGISTER_ISALLFINITE_KERNEL_TYPED(double)\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n#include \"core/providers/cuda/cu_inc/common.cuh\"\n#include \"contrib_ops/cuda/math/isfinite.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \n__device__ __forceinline__ bool IsFiniteScalar(const T value) {\n return isfinite(value);\n}\n\ntemplate \n__device__ __forceinline__ bool IsInfScalar(const T value) {\n return isinf(value);\n}\n\ntemplate \n__device__ __forceinline__ bool IsNaNScalar(const T value) {\n return isnan(value);\n}\n\ntemplate <>\n__device__ __forceinline__ bool IsFiniteScalar(const half value) {\n#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)\n return !__hisinf(value) && !__hisnan(value);\n#else\n return isfinite(float(value));\n#endif\n}\n\ntemplate <>\n__device__ __forceinline__ bool IsInfScalar(const half value) {\n#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)\n return __hisinf(value);\n#else\n return isinf(float(value));\n#endif\n}\n\ntemplate <>\n__device__ __forceinline__ bool IsNaNScalar(const half value) {\n#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)\n return __hisnan(value);\n#else\n return isnan(float(value));\n#endif\n}\n\ntemplate <>\n__device__ __forceinline__ bool IsFiniteScalar(const BFloat16 value) {\n return isfinite(static_cast(value));\n}\n\ntemplate <>\n__device__ __forceinline__ bool IsInfScalar(const BFloat16 value) {\n return isinf(static_cast(value));\n}\n\ntemplate <>\n__device__ __forceinline__ bool IsNaNScalar(const BFloat16 value) {\n return isnan(static_cast(value));\n}\n\n} // namespace cuda\n} // namespace onnxruntime\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n#include \"core/providers/rocm/cu_inc/common.cuh\"\n#include \"contrib_ops/rocm/math/isfinite.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \n__device__ __forceinline__ bool IsFiniteScalar(const T value) {\n return isfinite(value);\n}\n\ntemplate \n__device__ __forceinline__ bool IsInfScalar(const T value) {\n return isinf(value);\n}\n\ntemplate \n__device__ __forceinline__ bool IsNaNScalar(const T value) {\n return isnan(value);\n}\n\ntemplate <>\n__device__ __forceinline__ bool IsFiniteScalar(const half value) {\n#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)\n return !__hisinf(value) && !__hisnan(value);\n#else\n return isfinite(float(value));\n#endif\n}\n\ntemplate <>\n__device__ __forceinline__ bool IsInfScalar(const half value) {\n#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)\n return __hisinf(value);\n#else\n return isinf(float(value));\n#endif\n}\n\ntemplate <>\n__device__ __forceinline__ bool IsNaNScalar(const half value) {\n#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)\n return __hisnan(value);\n#else\n return isnan(float(value));\n#endif\n}\n\ntemplate <>\n__device__ __forceinline__ bool IsFiniteScalar(const BFloat16 value) {\n return isfinite(static_cast(value));\n}\n\ntemplate <>\n__device__ __forceinline__ bool IsInfScalar(const BFloat16 value) {\n return isinf(static_cast(value));\n}\n\ntemplate <>\n__device__ __forceinline__ bool IsNaNScalar(const BFloat16 value) {\n return isnan(static_cast(value));\n}\n\n} // namespace rocm\n} // namespace onnxruntime###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"core/providers/cpu/math/clip.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nclass Clip_6 final : public onnxruntime::clip_internal::Clip_6Base, public CudaKernel {\n public:\n explicit Clip_6(const OpKernelInfo& info) : onnxruntime::clip_internal::Clip_6Base(info), CudaKernel{info} {\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n// Since version 11. Min and Max are inputs\n// version 12 adds type support\nclass Clip final : public CudaKernel {\n public:\n explicit Clip(const OpKernelInfo& info) : CudaKernel{info} {\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n template \n struct ComputeImpl;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"core/providers/cpu/math/clip.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nclass Clip_6 final : public onnxruntime::clip_internal::Clip_6Base, public RocmKernel {\n public:\n explicit Clip_6(const OpKernelInfo& info) : onnxruntime::clip_internal::Clip_6Base(info), RocmKernel{info} {\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n// Since version 11. Min and Max are inputs\n// version 12 adds type support\nclass Clip final : public RocmKernel {\n public:\n explicit Clip(const OpKernelInfo& info) : RocmKernel{info} {\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n template \n struct ComputeImpl;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nclass IsAllFiniteOp final : public CudaKernel {\n public:\n IsAllFiniteOp(const OpKernelInfo& info) : CudaKernel(info) {\n int64_t isinf_only;\n info.GetAttrOrDefault(\"isinf_only\", &isinf_only, static_cast(0));\n isinf_only_ = (isinf_only != 0);\n\n int64_t isnan_only;\n info.GetAttrOrDefault(\"isnan_only\", &isnan_only, static_cast(0));\n isnan_only_ = (isnan_only != 0);\n\n ORT_ENFORCE(!(isinf_only_ && isnan_only_),\n \"Both attributes isinf_only and isnan_only cannot be set. Unset both to check for both conditions.\");\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n bool isinf_only_, isnan_only_;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nclass IsAllFiniteOp final : public RocmKernel {\n public:\n IsAllFiniteOp(const OpKernelInfo& info) : RocmKernel(info) {\n int64_t isinf_only;\n info.GetAttrOrDefault(\"isinf_only\", &isinf_only, static_cast(0));\n isinf_only_ = (isinf_only != 0);\n\n int64_t isnan_only;\n info.GetAttrOrDefault(\"isnan_only\", &isnan_only, static_cast(0));\n isnan_only_ = (isnan_only != 0);\n\n ORT_ENFORCE(!(isinf_only_ && isnan_only_),\n \"Both attributes isinf_only and isnan_only cannot be set. Unset both to check for both conditions.\");\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n bool isinf_only_, isnan_only_;\n};\n\n} // namespace rocm\n} // namespace onnxruntime###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \n#include \"isfinite_impl.h\"\n#include \"core/providers/cuda/cu_inc/common.cuh\"\n#include \"contrib_ops/cuda/math/isfinite.cuh\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \n__global__ void IsAllFiniteMultiTensorImpl(ChunkGroup<1> chunks, bool* output) {\n const int block_idx = blockIdx.x;\n const int tensor_idx = chunks.block_index_to_tensor_group_index[block_idx];\n const int tensor_size = chunks.tensor_sizes[tensor_idx];\n const TSrc* tensor_ptr = static_cast(chunks.tensor_ptrs[0][tensor_idx]);\n const int chunk_start_idx = chunks.block_index_to_chunk_start_index[block_idx];\n // chunk_size is chunks.chunk_size if the loaded chunk is full. Otherwise (this\n // chunk is the last one in the source tensor), the actual size is determined\n // by the bound of the source tensor.\n const int chunk_size = min(tensor_size, chunk_start_idx + chunks.chunk_size) - chunk_start_idx;\n\n const TSrc* chunk_ptr = tensor_ptr + chunk_start_idx;\n bool result = true;\n#pragma unroll 4\n for (int i = threadIdx.x; i < chunk_size; i += blockDim.x) {\n if (isinf_only) {\n result &= !IsInfScalar(chunk_ptr[i]);\n } else if (isnan_only) {\n result &= !IsNaNScalar(chunk_ptr[i]);\n } else {\n result &= IsFiniteScalar(chunk_ptr[i]);\n }\n }\n\n if (!result) {\n *output = false;\n }\n}\n\ntemplate \nvoid IsAllFiniteFunctor::operator()(cudaStream_t stream,\n ChunkGroup<1> chunks,\n bool* output,\n const bool isinf_only,\n const bool isnan_only) {\n const int block_count = chunks.chunk_count;\n const int thread_count = ChunkGroup<1>::thread_count_per_block;\n if (isinf_only) {\n IsAllFiniteMultiTensorImpl<<>>(chunks, output);\n } else if (isnan_only) {\n IsAllFiniteMultiTensorImpl<<>>(chunks, output);\n } else {\n IsAllFiniteMultiTensorImpl<<>>(chunks, output);\n }\n}\n\n#define INSTANTIATE_ISALLFINITE_FUNCTOR(T) \\\n template void IsAllFiniteFunctor::operator()(cudaStream_t stream, \\\n ChunkGroup<1> chunks, \\\n bool* output, \\\n const bool isinf_only, \\\n const bool isnan_only);\n\nINSTANTIATE_ISALLFINITE_FUNCTOR(half)\nINSTANTIATE_ISALLFINITE_FUNCTOR(float)\nINSTANTIATE_ISALLFINITE_FUNCTOR(double)\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " #include \"hip/hip_runtime.h\"\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \n#include \"isfinite_impl.h\"\n#include \"core/providers/rocm/cu_inc/common.cuh\"\n#include \"contrib_ops/rocm/math/isfinite.cuh\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \n__global__ void IsAllFiniteMultiTensorImpl(ChunkGroup<1> chunks, bool* output) {\n const int block_idx = blockIdx.x;\n const int tensor_idx = chunks.block_index_to_tensor_group_index[block_idx];\n const int tensor_size = chunks.tensor_sizes[tensor_idx];\n const TSrc* tensor_ptr = static_cast(chunks.tensor_ptrs[0][tensor_idx]);\n const int chunk_start_idx = chunks.block_index_to_chunk_start_index[block_idx];\n // chunk_size is chunks.chunk_size if the loaded chunk is full. Otherwise (this\n // chunk is the last one in the source tensor), the actual size is determined\n // by the bound of the source tensor.\n const int chunk_size = min(tensor_size, chunk_start_idx + chunks.chunk_size) - chunk_start_idx;\n\n const TSrc* chunk_ptr = tensor_ptr + chunk_start_idx;\n bool result = true;\n#pragma unroll 4\n for (int i = threadIdx.x; i < chunk_size; i += blockDim.x) {\n if (isinf_only) {\n result &= !IsInfScalar(chunk_ptr[i]);\n } else if (isnan_only) {\n result &= !IsNaNScalar(chunk_ptr[i]);\n } else {\n result &= IsFiniteScalar(chunk_ptr[i]);\n }\n }\n\n if (!result) {\n *output = false;\n }\n}\n\ntemplate \nvoid IsAllFiniteFunctor::operator()(hipStream_t stream,\n ChunkGroup<1> chunks,\n bool* output,\n const bool isinf_only,\n const bool isnan_only) {\n const int block_count = chunks.chunk_count;\n const int thread_count = ChunkGroup<1>::thread_count_per_block;\n if (isinf_only) {\n IsAllFiniteMultiTensorImpl<<>>(chunks, output);\n } else if (isnan_only) {\n IsAllFiniteMultiTensorImpl<<>>(chunks, output);\n } else {\n IsAllFiniteMultiTensorImpl<<>>(chunks, output);\n }\n}\n\n#define INSTANTIATE_ISALLFINITE_FUNCTOR(T) \\\n template void IsAllFiniteFunctor::operator()(hipStream_t stream, \\\n ChunkGroup<1> chunks, \\\n bool* output, \\\n const bool isinf_only, \\\n const bool isnan_only);\n\nINSTANTIATE_ISALLFINITE_FUNCTOR(half)\nINSTANTIATE_ISALLFINITE_FUNCTOR(float)\nINSTANTIATE_ISALLFINITE_FUNCTOR(double)\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n#include \"core/providers/cuda/multi_tensor/common.cuh\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nstruct IsAllFiniteFunctor {\n void operator()(cudaStream_t stream, ChunkGroup<1> chunks, bool* output, const bool isinf_only, const bool isnan_only);\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n#include \"core/providers/rocm/multi_tensor/common.cuh\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nstruct IsAllFiniteFunctor {\n void operator()(hipStream_t stream, ChunkGroup<1> chunks, bool* output, const bool isinf_only, const bool isnan_only);\n};\n\n} // namespace rocm\n} // namespace onnxruntime###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#ifdef ENABLE_TRAINING_OPS\n// Should remove the shrunken_gather include from ENABLE_TRAINING_OPS once 1). compute optimizer is enabled for inference or\n// 2). this is needed by inference for other purpose.\n\n#include \"contrib_ops/cuda/tensor/shrunken_gather.h\"\n#include \"contrib_ops/cpu/tensor/shrunken_gather.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\nusing namespace onnxruntime::cuda;\n\nONNX_OPERATOR_KERNEL_EX(\n ShrunkenGather,\n kMSDomain,\n 1,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", DataTypeImpl::AllTensorTypes())\n .TypeConstraint(\"Tind\", std::vector{\n DataTypeImpl::GetTensorType(),\n DataTypeImpl::GetTensorType()}),\n ShrunkenGather);\n\nStatus ShrunkenGather::ComputeInternal(OpKernelContext* context) const {\n Prepare p;\n ORT_RETURN_IF_ERROR(PrepareForCompute(context, p));\n ShrunkenGatherCommon::CheckInput(p.input_tensor, p.indices_tensor, p.axis);\n return onnxruntime::cuda::Gather::ComputeInternal(context);\n}\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n#endif\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#ifdef ENABLE_TRAINING_OPS\n// Should remove the shrunken_gather include from ENABLE_TRAINING_OPS once 1). compute optimizer is enabled for inference or\n// 2). this is needed by inference for other purpose.\n\n#include \"contrib_ops/rocm/tensor/shrunken_gather.h\"\n#include \"contrib_ops/cpu/tensor/shrunken_gather.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\nusing namespace onnxruntime::rocm;\n\nONNX_OPERATOR_KERNEL_EX(\n ShrunkenGather,\n kMSDomain,\n 1,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", DataTypeImpl::AllTensorTypes())\n .TypeConstraint(\"Tind\", std::vector{\n DataTypeImpl::GetTensorType(),\n DataTypeImpl::GetTensorType()}),\n ShrunkenGather);\n\nStatus ShrunkenGather::ComputeInternal(OpKernelContext* context) const {\n Prepare p;\n ORT_RETURN_IF_ERROR(PrepareForCompute(context, p));\n ShrunkenGatherCommon::CheckInput(p.input_tensor, p.indices_tensor, p.axis);\n return onnxruntime::rocm::Gather::ComputeInternal(context);\n}\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n\n#endif\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#ifdef ENABLE_TRAINING_OPS\n// Should remove the shrunken_gather include from ENABLE_TRAINING_OPS once 1). compute optimizer is enabled for inference or\n// 2). this is needed by inference for other purpose.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/cuda/tensor/gather.h\"\n#include \"contrib_ops/cpu/tensor/shrunken_gather.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\nclass ShrunkenGather final : public onnxruntime::cuda::Gather, public ShrunkenGatherCommon {\n public:\n ShrunkenGather(const OpKernelInfo& info) : onnxruntime::cuda::Gather(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n#endif\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#ifdef ENABLE_TRAINING_OPS\n// Should remove the shrunken_gather include from ENABLE_TRAINING_OPS once 1). compute optimizer is enabled for inference or\n// 2). this is needed by inference for other purpose.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/rocm/tensor/gather.h\"\n#include \"contrib_ops/cpu/tensor/shrunken_gather.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\nclass ShrunkenGather final : public onnxruntime::rocm::Gather, public ShrunkenGatherCommon {\n public:\n ShrunkenGather(const OpKernelInfo& info) : onnxruntime::rocm::Gather(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n\n#endif\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/cuda/tensor/trilu.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\nONNX_OPERATOR_KERNEL_EX(\n Trilu,\n kMSDomain,\n 1,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .InputMemoryType(OrtMemTypeCPUInput, 1)\n .MayInplace(0, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes()),\n onnxruntime::cuda::Trilu);\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/rocm/tensor/trilu.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\nONNX_OPERATOR_KERNEL_EX(\n Trilu,\n kMSDomain,\n 1,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .InputMemoryType(OrtMemTypeCPUInput, 1)\n .MayInplace(0, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes()),\n onnxruntime::rocm::Trilu);\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"contrib_ops/cpu/transformers/beam_search.h\"\n\nnamespace onnxruntime {\nclass SessionState;\n\nnamespace contrib {\nnamespace cuda {\n\nclass BeamSearch final : public onnxruntime::contrib::transformers::BeamSearch {\n public:\n BeamSearch(const OpKernelInfo& info);\n\n Status Compute(OpKernelContext* context) const override;\n\n private:\n Status ComputeInternal(OpKernelContext* context) const;\n};\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"contrib_ops/cpu/transformers/beam_search.h\"\n\nnamespace onnxruntime {\nclass SessionState;\n\nnamespace contrib {\nnamespace rocm {\n\nclass BeamSearch final : public onnxruntime::contrib::transformers::BeamSearch {\n public:\n BeamSearch(const OpKernelInfo& info);\n\n Status Compute(OpKernelContext* context) const override;\n\n private:\n Status ComputeInternal(OpKernelContext* context) const;\n};\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n\n#include \n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\ntemplate \nvoid BeamSearchTopK(\n const T* input,\n int32_t batch_size,\n int32_t num_beams,\n int32_t vocab_size,\n int32_t k,\n T* tmp_values_1st_stage,\n int32_t* tmp_indices_1st_stage,\n T* tmp_values_2st_stage,\n int32_t* tmp_indices_2st_stage,\n T* output_values,\n int32_t* output_tokens,\n int32_t* output_indices,\n cudaStream_t stream);\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n\n#include \n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\ntemplate \nvoid BeamSearchTopK(\n const T* input,\n int32_t batch_size,\n int32_t num_beams,\n int32_t vocab_size,\n int32_t k,\n T* tmp_values_1st_stage,\n int32_t* tmp_indices_1st_stage,\n T* tmp_values_2st_stage,\n int32_t* tmp_indices_2st_stage,\n T* output_values,\n int32_t* output_tokens,\n int32_t* output_indices,\n hipStream_t stream);\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/framework/tensorprotoutils.h\"\n#include \"core/framework/ort_value.h\"\n#include \"contrib_ops/cpu/utils/console_dumper.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\nnamespace transformers {\n\nclass CudaTensorConsoleDumper : public onnxruntime::contrib::transformers::IConsoleDumper {\n public:\n CudaTensorConsoleDumper() = default;\n virtual ~CudaTensorConsoleDumper() {}\n void Print(const char* name, const float* tensor, int dim0, int dim1) const override;\n void Print(const char* name, const MLFloat16* tensor, int dim0, int dim1) const override;\n void Print(const char* name, const size_t* tensor, int dim0, int dim1) const override;\n void Print(const char* name, const half* tensor, int dim0, int dim1) const;\n void Print(const char* name, const int64_t* tensor, int dim0, int dim1) const override;\n void Print(const char* name, const int32_t* tensor, int dim0, int dim1) const override;\n void Print(const char* name, const float* tensor, int dim0, int dim1, int dim2) const override;\n void Print(const char* name, const float* tensor, int dim0, int dim1, int dim2, int dim3) const;\n void Print(const char* name, const MLFloat16* tensor, int dim0, int dim1, int dim2) const override;\n void Print(const char* name, const MLFloat16* tensor, int dim0, int dim1, int dim2, int dim3) const;\n void Print(const char* name, const half* tensor, int dim0, int dim1, int dim2) const;\n void Print(const char* name, const half* tensor, int dim0, int dim1, int dim2, int dim3) const;\n void Print(const char* name, const int64_t* tensor, int dim0, int dim1, int dim2) const override;\n void Print(const char* name, const int32_t* tensor, int dim0, int dim1, int dim2) const override;\n void Print(const char* name, const Tensor& value) const override;\n void Print(const char* name, const OrtValue& value) const override;\n void Print(const char* name, int index, bool end_line) const override;\n void Print(const char* name, const std::string& value, bool end_line) const override;\n};\n\n} // namespace transformers\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/framework/tensorprotoutils.h\"\n#include \"core/framework/ort_value.h\"\n#include \"contrib_ops/cpu/utils/console_dumper.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\nnamespace transformers {\n\nclass HipTensorConsoleDumper : public onnxruntime::contrib::transformers::IConsoleDumper {\n public:\n HipTensorConsoleDumper() = default;\n virtual ~HipTensorConsoleDumper() {}\n void Print(const char* name, const float* tensor, int dim0, int dim1) const override;\n void Print(const char* name, const MLFloat16* tensor, int dim0, int dim1) const override;\n void Print(const char* name, const size_t* tensor, int dim0, int dim1) const override;\n void Print(const char* name, const half* tensor, int dim0, int dim1) const;\n void Print(const char* name, const int64_t* tensor, int dim0, int dim1) const override;\n void Print(const char* name, const int32_t* tensor, int dim0, int dim1) const override;\n void Print(const char* name, const float* tensor, int dim0, int dim1, int dim2) const override;\n void Print(const char* name, const float* tensor, int dim0, int dim1, int dim2, int dim3) const;\n void Print(const char* name, const MLFloat16* tensor, int dim0, int dim1, int dim2) const override;\n void Print(const char* name, const MLFloat16* tensor, int dim0, int dim1, int dim2, int dim3) const;\n void Print(const char* name, const half* tensor, int dim0, int dim1, int dim2) const;\n void Print(const char* name, const half* tensor, int dim0, int dim1, int dim2, int dim3) const;\n void Print(const char* name, const int64_t* tensor, int dim0, int dim1, int dim2) const override;\n void Print(const char* name, const int32_t* tensor, int dim0, int dim1, int dim2) const override;\n void Print(const char* name, const Tensor& value) const override;\n void Print(const char* name, const OrtValue& value) const override;\n void Print(const char* name, int index, bool end_line) const override;\n void Print(const char* name, const std::string& value, bool end_line) const override;\n};\n\n} // namespace transformers\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n\n\n#pragma once\n#include \n#include \n#include \nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\nvoid LaunchInitKernel(\n float* beam_scores, int batch_size, int num_beams, cudaStream_t stream);\ntemplate \nvoid LaunchAddProbsKernel(T* log_probs, T* cum_log_probs, const int batch_size, const int num_beams, const int vocab_size, cudaStream_t stream);\ntemplate \nvoid LaunchLogitsProcessKernel(\n T* next_token_scores, const int* vocab_mask, const int* prefix_vocab_mask, int* presence_mask, float presence_penalty, float temperature, int batch_size, int num_beams, int vocab_size, int padded_vocab_size, int demote_token_id, int32_t* sequences, int max_sequence_length, int current_sequence_length, float repetition_penalty, int no_repeat_ngram_size, cudaStream_t stream);\nvoid LaunchNextTokenKernel(const int64_t* next_token_indices, int32_t* next_indices, int32_t* next_tokens, int batch_size, int top_k, int vocab_size, cudaStream_t stream);\nvoid LaunchUpdateGptKernel(const int32_t* old_mask_data, int32_t* mask_data, int32_t* next_positions, int batch_beam_size, int current_length, cudaStream_t stream);\ntemplate \nvoid GetTempStorageSize(const T* d_keys_in, const int* d_values_in, int* d_offsets, int num_items, int num_segments, cudaStream_t stream, bool is_descending, size_t& temp_storage_bytes);\nvoid LaunchSetupParamsKernel(int* d_values_in, int* d_offsets, int batch_size, int vocab_size, cudaStream_t stream);\ntemplate \nvoid LaunchSortPairs(void* d_temp_storage, size_t temp_storage_bytes, const T* d_keys_in, T* d_keys_out, const int* d_values_in, int* d_values_out, int num_items, int num_segments, int* d_offsets, cudaStream_t stream, bool is_descending);\ntemplate \nvoid LaunchFilterLogitsKernel(float* d_sorted_logits_in, const int* d_sorted_indices, T* d_logits_in_out, float top_p, float filter_value, int min_tokens_to_keep, int batch_size, int vocab_size, cudaStream_t stream, bool is_descending);\nvoid TorchMultinomialKernelLauncher(float* d_input, float* d_sampled, int32_t* d_output, int batch_size, int vocab_size, int* d_presence_mask, cudaStream_t stream);\nvoid UpdateDecoderMaskedMultiHeadAttentionCacheIndirection(int32_t* tgt_indir_cache, const int32_t* src_indir_cache, const int32_t* beam_ids, int batch_size, int beam_width, int input_seq_length, int max_seq_length, int current_length, cudaStream_t stream);\ntemplate \nvoid KeyCacheExpansionKernelLauncher(const T* key_cache, T* key_cache_expanded, int batch_size, int beam_width, int num_heads, int sequence_length, int max_seq_length, int head_size, cudaStream_t stream);\ntemplate \nvoid BufferExpansionKernelLauncher(const T* input, T* output, int batch_size, int beam_width, int chunk_size, cudaStream_t stream);\n} \n} \n} \n\n###", "hip": " \n\n#pragma once\n#include \n#include \n#include \nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\nvoid LaunchInitKernel(\n float* beam_scores, int batch_size, int num_beams, hipStream_t stream);\ntemplate \nvoid LaunchAddProbsKernel(T* log_probs, T* cum_log_probs, const int batch_size, const int num_beams, const int vocab_size, hipStream_t stream);\ntemplate \nvoid LaunchLogitsProcessKernel(\n T* next_token_scores, const int* vocab_mask, const int* prefix_vocab_mask, int* presence_mask, float presence_penalty, float temperature, int batch_size, int num_beams, int vocab_size, int padded_vocab_size, int demote_token_id, int32_t* sequences, int max_sequence_length, int current_sequence_length, float repetition_penalty, int no_repeat_ngram_size, hipStream_t stream);\nvoid LaunchNextTokenKernel(const int64_t* next_token_indices, int32_t* next_indices, int32_t* next_tokens, int batch_size, int top_k, int vocab_size, hipStream_t stream);\nvoid LaunchUpdateGptKernel(const int32_t* old_mask_data, int32_t* mask_data, int32_t* next_positions, int batch_beam_size, int current_length, hipStream_t stream);\ntemplate \nvoid GetTempStorageSize(const T* d_keys_in, const int* d_values_in, int* d_offsets, int num_items, int num_segments, hipStream_t stream, bool is_descending, size_t& temp_storage_bytes);\nvoid LaunchSetupParamsKernel(int* d_values_in, int* d_offsets, int batch_size, int vocab_size, hipStream_t stream);\ntemplate \nvoid LaunchSortPairs(void* d_temp_storage, size_t temp_storage_bytes, const T* d_keys_in, T* d_keys_out, const int* d_values_in, int* d_values_out, int num_items, int num_segments, int* d_offsets, hipStream_t stream, bool is_descending);\ntemplate \nvoid LaunchFilterLogitsKernel(float* d_sorted_logits_in, const int* d_sorted_indices, T* d_logits_in_out, float top_p, float filter_value, int min_tokens_to_keep, int batch_size, int vocab_size, hipStream_t stream, bool is_descending);\nvoid TorchMultinomialKernelLauncher(float* d_input, float* d_sampled, int32_t* d_output, int batch_size, int vocab_size, int* d_presence_mask, hipStream_t stream);\nvoid UpdateDecoderMaskedMultiHeadAttentionCacheIndirection(int32_t* tgt_indir_cache, const int32_t* src_indir_cache, const int32_t* beam_ids, int batch_size, int beam_width, int input_seq_length, int max_seq_length, int current_length, hipStream_t stream);\ntemplate \nvoid KeyCacheExpansionKernelLauncher(const T* key_cache, T* key_cache_expanded, int batch_size, int beam_width, int num_heads, int sequence_length, int max_seq_length, int head_size, hipStream_t stream);\ntemplate \nvoid BufferExpansionKernelLauncher(const T* input, T* output, int batch_size, int beam_width, int chunk_size, hipStream_t stream);\n} \n} \n} ###" }, { "cuda": "\n\n\n#include \"core/providers/cuda/math/clip_impl.h\"\n#include \"core/providers/cuda/cu_inc/common.cuh\"\nnamespace onnxruntime {\nnamespace cuda {\ntemplate \n__global__ void _Clip(const T* input, T* output, const T* min, const T* max, T min_default, T max_default, size_t N) {\n auto min_val = (min) ? *min : min_default; \n auto max_val = (max) ? *max : max_default; \n CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);\n output[id] = (input[id] < min_val) ? min_val : ((input[id] > max_val) ? max_val : input[id]);\n}\ntemplate \nvoid ClipImpl(cudaStream_t stream, const T* input_data, T* output_data, const T* min, const T* max, T min_default, T max_default, size_t count) {\n typedef typename ToCudaType::MappedType CudaT;\n int blocksPerGrid = (int)(ceil(static_cast(count) / GridDim::maxThreadsPerBlock));\n union ConstAliasUnion {\n const T *t;\n const CudaT *cudaT;\n ConstAliasUnion(const T* _t) { t = _t;}\n };\n union AliasUnion {\n T *t;\n CudaT *cudaT;\n AliasUnion(T* _t) { t = _t;}\n };\n _Clip<<>>(((union ConstAliasUnion)input_data).cudaT, ((union AliasUnion)output_data).cudaT, ((union ConstAliasUnion)min).cudaT, ((union ConstAliasUnion)max).cudaT, *((union AliasUnion)&min_default).cudaT, *((union AliasUnion)&max_default).cudaT, count);\n}\ntemplate void ClipImpl(cudaStream_t stream, const float* input_data, float* output_data, const float* min, const float* max, float min_default, float max_default, size_t count);\ntemplate void ClipImpl(cudaStream_t stream, const double* input_data, double* output_data, const double* min, const double* max, double min_default, double max_default, size_t count);\ntemplate void ClipImpl(cudaStream_t stream, const MLFloat16* input_data, MLFloat16* output_data, const MLFloat16* min, const MLFloat16* max, MLFloat16 min_default, MLFloat16 max_default, size_t count);\ntemplate void ClipImpl(cudaStream_t stream, const int8_t* input_data, int8_t* output_data, const int8_t* min, const int8_t* max, int8_t min_default, int8_t max_default, size_t count);\ntemplate void ClipImpl(cudaStream_t stream, const uint8_t* input_data, uint8_t* output_data, const uint8_t* min, const uint8_t* max, uint8_t min_default, uint8_t max_default, size_t count);\ntemplate void ClipImpl(cudaStream_t stream, const int64_t* input_data, int64_t* output_data, const int64_t* min, const int64_t* max, int64_t min_default, int64_t max_default, size_t count);\ntemplate void ClipImpl(cudaStream_t stream, const uint64_t* input_data, uint64_t* output_data, const uint64_t* min, const uint64_t* max, uint64_t min_default, uint64_t max_default, size_t count);\n} \n} \n\n###", "hip": " #include \"hip/hip_runtime.h\"\n\n\n#include \"core/providers/rocm/math/clip_impl.h\"\n#include \"core/providers/rocm/cu_inc/common.cuh\"\nnamespace onnxruntime {\nnamespace rocm {\ntemplate \n__global__ void _Clip(const T* input, T* output, const T* min, const T* max, T min_default, T max_default, size_t N) {\n auto min_val = (min) ? *min : min_default; \n auto max_val = (max) ? *max : max_default; \n CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);\n output[id] = (input[id] < min_val) ? min_val : ((input[id] > max_val) ? max_val : input[id]);\n}\ntemplate \nvoid ClipImpl(hipStream_t stream, const T* input_data, T* output_data, const T* min, const T* max, T min_default, T max_default, size_t count) {\n typedef typename ToHipType::MappedType HipT;\n int blocksPerGrid = (int)(ceil(static_cast(count) / GridDim::maxThreadsPerBlock));\n union ConstAliasUnion {\n const T *t;\n const HipT *rocmT;\n ConstAliasUnion(const T* _t) { t = _t;}\n };\n union AliasUnion {\n T *t;\n HipT *rocmT;\n AliasUnion(T* _t) { t = _t;}\n };\n _Clip<<>>(((union ConstAliasUnion)input_data).rocmT, ((union AliasUnion)output_data).rocmT, ((union ConstAliasUnion)min).rocmT, ((union ConstAliasUnion)max).rocmT, *((union AliasUnion)&min_default).rocmT, *((union AliasUnion)&max_default).rocmT, count);\n}\ntemplate void ClipImpl(hipStream_t stream, const float* input_data, float* output_data, const float* min, const float* max, float min_default, float max_default, size_t count);\ntemplate void ClipImpl(hipStream_t stream, const double* input_data, double* output_data, const double* min, const double* max, double min_default, double max_default, size_t count);\ntemplate void ClipImpl(hipStream_t stream, const MLFloat16* input_data, MLFloat16* output_data, const MLFloat16* min, const MLFloat16* max, MLFloat16 min_default, MLFloat16 max_default, size_t count);\ntemplate void ClipImpl(hipStream_t stream, const int8_t* input_data, int8_t* output_data, const int8_t* min, const int8_t* max, int8_t min_default, int8_t max_default, size_t count);\ntemplate void ClipImpl(hipStream_t stream, const uint8_t* input_data, uint8_t* output_data, const uint8_t* min, const uint8_t* max, uint8_t min_default, uint8_t max_default, size_t count);\ntemplate void ClipImpl(hipStream_t stream, const int64_t* input_data, int64_t* output_data, const int64_t* min, const int64_t* max, int64_t min_default, int64_t max_default, size_t count);\ntemplate void ClipImpl(hipStream_t stream, const uint64_t* input_data, uint64_t* output_data, const uint64_t* min, const uint64_t* max, uint64_t min_default, uint64_t max_default, size_t count);\n} \n} ###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n\n#include \n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\ntemplate \nvoid GreedySearchTopOne(\n const T* input,\n int32_t batch_size,\n int32_t vocab_size,\n T* tmp_values,\n int32_t* tmp_tokens,\n T* output_values,\n int32_t* output_tokens,\n cudaStream_t stream);\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n\n#include \n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\ntemplate \nvoid GreedySearchTopOne(\n const T* input,\n int32_t batch_size,\n int32_t vocab_size,\n T* tmp_values,\n int32_t* tmp_tokens,\n T* output_values,\n int32_t* output_tokens,\n hipStream_t stream);\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/cuda/cuda_common.h\"\n#include \"core/providers/cuda/cuda_execution_provider.h\"\n#include \"contrib_ops/cuda/transformers/sampling.h\"\n#include \"contrib_ops/cuda/transformers/generation_device_helper.h\"\n#include \"contrib_ops/cuda/transformers/dump_cuda_tensor.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace cuda {\n\nONNX_OPERATOR_KERNEL_EX(\n Sampling,\n kMSDomain,\n 1,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .InputMemoryType(OrtMemTypeCPUInput, 0) // 'input_ids' needs to be on CPU\n .InputMemoryType(OrtMemTypeCPUInput, 1) // 'max_length' needs to be on CPU\n .InputMemoryType(OrtMemTypeCPUInput, 2) // 'min_length' needs to be on CPU\n .InputMemoryType(OrtMemTypeCPUInput, 3) // 'repetition_penalty' needs to be on CPU\n .InputMemoryType(OrtMemTypeCPUInput, 6) // 'custom_attention_mask' needs to be on CPU\n .OutputMemoryType(OrtMemTypeCPUOutput, 0) // 'sequences' output on CPU\n .OutputMemoryType(OrtMemTypeCPUOutput, 1) // 'logits_to_debug' output on CPU\n .TypeConstraint(\"T\", {DataTypeImpl::GetTensorType(),\n DataTypeImpl::GetTensorType()}),\n Sampling);\n\ntransformers::CudaTensorConsoleDumper g_cuda_dumper_sampling;\n\nSampling::Sampling(const OpKernelInfo& info)\n : onnxruntime::contrib::transformers::Sampling(info) {\n SetDeviceHelpers(GenerationCudaDeviceHelper::ReorderPastState,\n GenerationCudaDeviceHelper::AddToFeeds,\n GenerationCudaDeviceHelper::TopK,\n GenerationCudaDeviceHelper::DeviceCopy,\n GenerationCudaDeviceHelper::GreedySearchProcessLogits,\n GenerationCudaDeviceHelper::GreedySearchProcessLogits,\n GenerationCudaDeviceHelper::InitGreedyState,\n GenerationCudaDeviceHelper::InitGreedyState);\n\n SetDeviceHelpers_Gpt(GenerationCudaDeviceHelper::UpdateGptFeeds,\n GenerationCudaDeviceHelper::UpdateGptFeeds);\n\n SetConsoleDumper(&g_cuda_dumper_sampling);\n\n gpu_device_prop_ = &reinterpret_cast(info.GetExecutionProvider())->GetDeviceProp();\n\n gpu_device_arch_ = static_cast(gpu_device_prop_)->major * 100 +\n static_cast(gpu_device_prop_)->minor * 10;\n}\n\nStatus Sampling::ComputeInternal(OpKernelContext* context) const {\n return onnxruntime::contrib::transformers::Sampling::Compute(context);\n}\n\nStatus Sampling::Compute(OpKernelContext* context) const {\n auto s = ComputeInternal(context);\n\n if (s.IsOK()) {\n auto err = cudaGetLastError();\n if (err != cudaSuccess) {\n return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, \"CUDA error \", cudaGetErrorName(err), \":\", cudaGetErrorString(err));\n }\n }\n\n return s;\n}\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/rocm/rocm_common.h\"\n#include \"core/providers/rocm/rocm_execution_provider.h\"\n#include \"contrib_ops/rocm/transformers/sampling.h\"\n#include \"contrib_ops/rocm/transformers/generation_device_helper.h\"\n#include \"contrib_ops/rocm/transformers/dump_rocm_tensor.h\"\n\nnamespace onnxruntime {\nnamespace contrib {\nnamespace rocm {\n\nONNX_OPERATOR_KERNEL_EX(\n Sampling,\n kMSDomain,\n 1,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .InputMemoryType(OrtMemTypeCPUInput, 0) // 'input_ids' needs to be on CPU\n .InputMemoryType(OrtMemTypeCPUInput, 1) // 'max_length' needs to be on CPU\n .InputMemoryType(OrtMemTypeCPUInput, 2) // 'min_length' needs to be on CPU\n .InputMemoryType(OrtMemTypeCPUInput, 3) // 'repetition_penalty' needs to be on CPU\n .InputMemoryType(OrtMemTypeCPUInput, 6) // 'custom_attention_mask' needs to be on CPU\n .OutputMemoryType(OrtMemTypeCPUOutput, 0) // 'sequences' output on CPU\n .OutputMemoryType(OrtMemTypeCPUOutput, 1) // 'logits_to_debug' output on CPU\n .TypeConstraint(\"T\", {DataTypeImpl::GetTensorType(),\n DataTypeImpl::GetTensorType()}),\n Sampling);\n\ntransformers::HipTensorConsoleDumper g_rocm_dumper_sampling;\n\nSampling::Sampling(const OpKernelInfo& info)\n : onnxruntime::contrib::transformers::Sampling(info) {\n SetDeviceHelpers(GenerationCudaDeviceHelper::ReorderPastState,\n GenerationCudaDeviceHelper::AddToFeeds,\n GenerationCudaDeviceHelper::TopK,\n GenerationCudaDeviceHelper::DeviceCopy,\n GenerationCudaDeviceHelper::GreedySearchProcessLogits,\n GenerationCudaDeviceHelper::GreedySearchProcessLogits,\n GenerationCudaDeviceHelper::InitGreedyState,\n GenerationCudaDeviceHelper::InitGreedyState);\n\n SetDeviceHelpers_Gpt(GenerationCudaDeviceHelper::UpdateGptFeeds,\n GenerationCudaDeviceHelper::UpdateGptFeeds);\n\n SetConsoleDumper(&g_rocm_dumper_sampling);\n\n gpu_device_prop_ = &reinterpret_cast(info.GetExecutionProvider())->GetDeviceProp();\n\n gpu_device_arch_ = static_cast(gpu_device_prop_)->major * 100 +\n static_cast(gpu_device_prop_)->minor * 10;\n}\n\nStatus Sampling::ComputeInternal(OpKernelContext* context) const {\n return onnxruntime::contrib::transformers::Sampling::Compute(context);\n}\n\nStatus Sampling::Compute(OpKernelContext* context) const {\n auto s = ComputeInternal(context);\n\n if (s.IsOK()) {\n auto err = hipGetLastError();\n if (err != hipSuccess) {\n return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, \"ROCM error \", hipGetErrorName(err), \":\", hipGetErrorString(err));\n }\n }\n\n return s;\n}\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"contrib_ops/cpu/transformers/sampling.h\"\n\nnamespace onnxruntime {\nclass SessionState;\n\nnamespace contrib {\nnamespace cuda {\n\nclass Sampling final : public onnxruntime::contrib::transformers::Sampling {\n public:\n Sampling(const OpKernelInfo& info);\n\n Status Compute(OpKernelContext* context) const override;\n\n private:\n Status ComputeInternal(OpKernelContext* context) const;\n};\n\n} // namespace cuda\n} // namespace contrib\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"contrib_ops/cpu/transformers/sampling.h\"\n\nnamespace onnxruntime {\nclass SessionState;\n\nnamespace contrib {\nnamespace rocm {\n\nclass Sampling final : public onnxruntime::contrib::transformers::Sampling {\n public:\n Sampling(const OpKernelInfo& info);\n\n Status Compute(OpKernelContext* context) const override;\n\n private:\n Status ComputeInternal(OpKernelContext* context) const;\n};\n\n} // namespace rocm\n} // namespace contrib\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"orttraining/training_ops/cuda/activation/activations_grad.h\"\n#include \"core/framework/op_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\n#define REGISTER_ACTIVATION_GRAD_KERNEL(x, ver, domain, T) \\\n ONNX_OPERATOR_TYPED_KERNEL_EX( \\\n x, \\\n domain, \\\n ver, \\\n T, \\\n kCudaExecutionProvider, \\\n (*KernelDefBuilder::Create()) \\\n .TypeConstraint(\"T\", DataTypeImpl::GetTensorType()) \\\n .MayInplace(0, 0), \\\n x);\n\n#define BINARY_ELEMENTWISE_COMPUTE(x, T) \\\n template <> \\\n Status x::ComputeInternal(OpKernelContext* context) const { \\\n BinaryElementwisePreparation prepare; \\\n ORT_RETURN_IF_ERROR(Prepare(context, &prepare)); \\\n Ctx##x func_ctx = MakeFuncCtx(); \\\n Impl_##x::MappedType>( \\\n Stream(context), \\\n reinterpret_cast::MappedType*>(prepare.lhs_tensor->template Data()), \\\n reinterpret_cast::MappedType*>(prepare.rhs_tensor->template Data()), \\\n reinterpret_cast::MappedType*>(prepare.output_tensor->template MutableData()), \\\n &func_ctx, prepare.output_tensor->Shape().Size()); \\\n return Status::OK(); \\\n }\n\n#define ACTIVATION_GRAD_OP_TYPED(name, ver, domain, T) \\\n REGISTER_ACTIVATION_GRAD_KERNEL(name, ver, domain, T) \\\n BINARY_ELEMENTWISE_COMPUTE(name, T)\n\n#define ACTIVATION_GRAD_OP_HFD(name, ver, domain) \\\n ACTIVATION_GRAD_OP_TYPED(name, ver, domain, MLFloat16) \\\n ACTIVATION_GRAD_OP_TYPED(name, ver, domain, float) \\\n ACTIVATION_GRAD_OP_TYPED(name, ver, domain, double)\n\nACTIVATION_GRAD_OP_HFD(GeluGrad, 1, kMSDomain);\nACTIVATION_GRAD_OP_HFD(FastGeluGrad, 1, kMSDomain);\nACTIVATION_GRAD_OP_HFD(ReluGrad, 1, kMSDomain);\nACTIVATION_GRAD_OP_HFD(SigmoidGrad, 1, kMSDomain);\nACTIVATION_GRAD_OP_HFD(QuickGeluGrad, 1, kMSDomain);\nACTIVATION_GRAD_OP_HFD(TanhGrad, 1, kMSDomain);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"orttraining/training_ops/rocm/activation/activations_grad.h\"\n#include \"core/framework/op_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\n#define REGISTER_ACTIVATION_GRAD_KERNEL(x, ver, domain, T) \\\n ONNX_OPERATOR_TYPED_KERNEL_EX( \\\n x, \\\n domain, \\\n ver, \\\n T, \\\n kRocmExecutionProvider, \\\n (*KernelDefBuilder::Create()) \\\n .TypeConstraint(\"T\", DataTypeImpl::GetTensorType()) \\\n .MayInplace(0, 0), \\\n x);\n\n#define BINARY_ELEMENTWISE_COMPUTE(x, T) \\\n template <> \\\n Status x::ComputeInternal(OpKernelContext* context) const { \\\n BinaryElementwisePreparation prepare; \\\n ORT_RETURN_IF_ERROR(Prepare(context, &prepare)); \\\n Ctx##x func_ctx = MakeFuncCtx(); \\\n Impl_##x::MappedType>( \\\n Stream(context), \\\n reinterpret_cast::MappedType*>(prepare.lhs_tensor->template Data()), \\\n reinterpret_cast::MappedType*>(prepare.rhs_tensor->template Data()), \\\n reinterpret_cast::MappedType*>(prepare.output_tensor->template MutableData()), \\\n &func_ctx, prepare.output_tensor->Shape().Size()); \\\n return Status::OK(); \\\n }\n\n#define ACTIVATION_GRAD_OP_TYPED(name, ver, domain, T) \\\n REGISTER_ACTIVATION_GRAD_KERNEL(name, ver, domain, T) \\\n BINARY_ELEMENTWISE_COMPUTE(name, T)\n\n#define ACTIVATION_GRAD_OP_HFD(name, ver, domain) \\\n ACTIVATION_GRAD_OP_TYPED(name, ver, domain, MLFloat16) \\\n ACTIVATION_GRAD_OP_TYPED(name, ver, domain, float) \\\n ACTIVATION_GRAD_OP_TYPED(name, ver, domain, double)\n\nACTIVATION_GRAD_OP_HFD(GeluGrad, 1, kMSDomain);\nACTIVATION_GRAD_OP_HFD(FastGeluGrad, 1, kMSDomain);\nACTIVATION_GRAD_OP_HFD(ReluGrad, 1, kMSDomain);\nACTIVATION_GRAD_OP_HFD(SigmoidGrad, 1, kMSDomain);\nACTIVATION_GRAD_OP_HFD(QuickGeluGrad, 1, kMSDomain);\nACTIVATION_GRAD_OP_HFD(TanhGrad, 1, kMSDomain);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/cuda_common.h\"\n#include \"core/providers/cuda/math/binary_elementwise_ops.h\"\n#include \"core/providers/cuda/activation/activations.h\"\n#include \"orttraining/training_ops/cuda/activation/activations_grad_impl.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nclass GeluGrad final : public BinaryElementwise {\n public:\n GeluGrad(const OpKernelInfo& info) : BinaryElementwise(info) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n MAKE_FUNC_CTX_NULL()\n};\n\ntemplate \nclass FastGeluGrad final : public BinaryElementwise {\n public:\n FastGeluGrad(const OpKernelInfo& info) : BinaryElementwise(info) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n MAKE_FUNC_CTX_NULL()\n};\n\ntemplate \nclass ReluGrad final : public BinaryElementwise {\n public:\n ReluGrad(const OpKernelInfo& info) : BinaryElementwise(info) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n MAKE_FUNC_CTX_NULL()\n};\n\ntemplate \nclass SigmoidGrad final : public BinaryElementwise {\n public:\n SigmoidGrad(const OpKernelInfo& info) : BinaryElementwise(info) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n MAKE_FUNC_CTX_NULL()\n};\n\ntemplate \nclass QuickGeluGrad final : public BinaryElementwise {\n public:\n QuickGeluGrad(const OpKernelInfo& info) : BinaryElementwise(info) {\n alpha_ = info.GetAttrOrDefault(\"alpha\", 1.702f);\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n MAKE_FUNC_CTX_ALPHA()\n float alpha_;\n};\n\ntemplate \nclass TanhGrad final : public BinaryElementwise {\n public:\n TanhGrad(const OpKernelInfo& info) : BinaryElementwise(info) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n MAKE_FUNC_CTX_NULL()\n};\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/rocm_common.h\"\n#include \"core/providers/rocm/math/binary_elementwise_ops.h\"\n#include \"core/providers/rocm/activation/activations.h\"\n#include \"orttraining/training_ops/rocm/activation/activations_grad_impl.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nclass GeluGrad final : public BinaryElementwise {\n public:\n GeluGrad(const OpKernelInfo& info) : BinaryElementwise(info) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n MAKE_FUNC_CTX_NULL()\n};\n\ntemplate \nclass FastGeluGrad final : public BinaryElementwise {\n public:\n FastGeluGrad(const OpKernelInfo& info) : BinaryElementwise(info) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n MAKE_FUNC_CTX_NULL()\n};\n\ntemplate \nclass ReluGrad final : public BinaryElementwise {\n public:\n ReluGrad(const OpKernelInfo& info) : BinaryElementwise(info) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n MAKE_FUNC_CTX_NULL()\n};\n\ntemplate \nclass SigmoidGrad final : public BinaryElementwise {\n public:\n SigmoidGrad(const OpKernelInfo& info) : BinaryElementwise(info) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n MAKE_FUNC_CTX_NULL()\n};\n\ntemplate \nclass QuickGeluGrad final : public BinaryElementwise {\n public:\n QuickGeluGrad(const OpKernelInfo& info) : BinaryElementwise(info) {\n alpha_ = info.GetAttrOrDefault(\"alpha\", 1.702f);\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n MAKE_FUNC_CTX_ALPHA()\n float alpha_;\n};\n\ntemplate \nclass TanhGrad final : public BinaryElementwise {\n public:\n TanhGrad(const OpKernelInfo& info) : BinaryElementwise(info) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n MAKE_FUNC_CTX_NULL()\n};\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/cuda/activation/activations_impl.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntypedef onnxruntime::cuda::CtxNull CtxGeluGrad;\ntypedef onnxruntime::cuda::CtxNull CtxFastGeluGrad;\ntypedef onnxruntime::cuda::CtxNull CtxReluGrad;\ntypedef onnxruntime::cuda::CtxNull CtxSigmoidGrad;\ntypedef onnxruntime::cuda::CtxAlpha CtxQuickGeluGrad;\ntypedef onnxruntime::cuda::CtxNull CtxTanhGrad;\n\n#define ACTIVATION_GRAD_OPS() \\\n ACTIVATION_GRAD_OP_NAME(GeluGrad) \\\n ACTIVATION_GRAD_OP_NAME(FastGeluGrad) \\\n ACTIVATION_GRAD_OP_NAME(ReluGrad) \\\n ACTIVATION_GRAD_OP_NAME(SigmoidGrad) \\\n ACTIVATION_GRAD_OP_NAME(QuickGeluGrad) \\\n ACTIVATION_GRAD_OP_NAME(TanhGrad)\n\n#define BINARY_ELEMENTWISE_IMPL_DECLARATION(name) \\\n template \\\n void Impl_##name(cudaStream_t stream, \\\n const T* lhs_data, \\\n const T* rhs_data, \\\n T* output_data, \\\n const Ctx##name* func_ctx, \\\n size_t count)\n\n#define ACTIVATION_GRAD_OP_NAME(name) BINARY_ELEMENTWISE_IMPL_DECLARATION(name);\nACTIVATION_GRAD_OPS()\n#undef ACTIVATION_GRAD_OP_NAME\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/rocm/activation/activations_impl.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntypedef onnxruntime::rocm::CtxNull CtxGeluGrad;\ntypedef onnxruntime::rocm::CtxNull CtxFastGeluGrad;\ntypedef onnxruntime::rocm::CtxNull CtxReluGrad;\ntypedef onnxruntime::rocm::CtxNull CtxSigmoidGrad;\ntypedef onnxruntime::rocm::CtxAlpha CtxQuickGeluGrad;\ntypedef onnxruntime::rocm::CtxNull CtxTanhGrad;\n\n#define ACTIVATION_GRAD_OPS() \\\n ACTIVATION_GRAD_OP_NAME(GeluGrad) \\\n ACTIVATION_GRAD_OP_NAME(FastGeluGrad) \\\n ACTIVATION_GRAD_OP_NAME(ReluGrad) \\\n ACTIVATION_GRAD_OP_NAME(SigmoidGrad) \\\n ACTIVATION_GRAD_OP_NAME(QuickGeluGrad) \\\n ACTIVATION_GRAD_OP_NAME(TanhGrad)\n\n#define BINARY_ELEMENTWISE_IMPL_DECLARATION(name) \\\n template \\\n void Impl_##name(hipStream_t stream, \\\n const T* lhs_data, \\\n const T* rhs_data, \\\n T* output_data, \\\n const Ctx##name* func_ctx, \\\n size_t count)\n\n#define ACTIVATION_GRAD_OP_NAME(name) BINARY_ELEMENTWISE_IMPL_DECLARATION(name);\nACTIVATION_GRAD_OPS()\n#undef ACTIVATION_GRAD_OP_NAME\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n\n\n#include \"orttraining/training_ops/cuda/activation/bias_gelu_grad.h\"\n#include \"core/common/common.h\"\n#include \"orttraining/training_ops/cpu/activation/gelu_computation_mode.h\"\n#include \"orttraining/training_ops/cuda/activation/bias_gelu_grad_impl.h\"\nnamespace onnxruntime {\nnamespace cuda {\nONNX_OPERATOR_KERNEL_EX(\n BiasGeluGrad_dX, kMSDomain, 1, kCudaExecutionProvider, (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", BuildKernelDefConstraints())\n .MayInplace(0, 0), BiasGeluGrad_dX);\nONNX_OPERATOR_KERNEL_EX(\n BiasFastGeluGrad_dX, kMSDomain, 1, kCudaExecutionProvider, (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", BuildKernelDefConstraints())\n .MayInplace(0, 0), BiasGeluGrad_dX);\ntemplate \ntemplate \nvoid BiasGeluGrad_dX::KernelLaunchDispatcher::operator()(\n cudaStream_t stream, int64_t input_size, int64_t bias_size, const Tensor& dY, const Tensor& X, const Tensor& B, Tensor& dX) const {\n using CudaT = typename ToCudaType::MappedType;\n LaunchBiasGeluGradDxKernel(\n stream, input_size, bias_size, reinterpret_cast(dY.template Data()), reinterpret_cast(X.template Data()), reinterpret_cast(B.template Data()), reinterpret_cast(dX.template MutableData()));\n}\ntemplate \nStatus BiasGeluGrad_dX::ComputeInternal(OpKernelContext* context) const {\n const auto* dY = context->Input(0);\n ORT_ENFORCE(dY);\n const auto* X = context->Input(1);\n ORT_ENFORCE(X);\n const auto* B = context->Input(2);\n ORT_ENFORCE(B);\n const auto& input_shape = X->Shape();\n ORT_ENFORCE(input_shape == dY->Shape(), \"dY and X must have the same shape.\");\n const auto& bias_shape = B->Shape();\n ORT_ENFORCE(\n input_shape.NumDimensions() >= 1 && bias_shape.NumDimensions() == 1 &&\n input_shape.GetDims().back() == bias_shape.GetDims().back(), \"B must be 1-dimensional and match the last dimension of X.\");\n auto* dX = context->Output(0, input_shape);\n ORT_ENFORCE(dX);\n const auto input_size = input_shape.Size(), bias_size = bias_shape.Size();\n utils::MLTypeCallDispatcher dispatcher{X->GetElementType()};\n dispatcher.Invoke(Stream(context), input_size, bias_size, *dY, *X, *B, *dX);\n return Status::OK();\n}\n} \n} \n\n###", "hip": " \n\n#include \"orttraining/training_ops/rocm/activation/bias_gelu_grad.h\"\n#include \"core/common/common.h\"\n#include \"orttraining/training_ops/cpu/activation/gelu_computation_mode.h\"\n#include \"orttraining/training_ops/rocm/activation/bias_gelu_grad_impl.h\"\nnamespace onnxruntime {\nnamespace rocm {\nONNX_OPERATOR_KERNEL_EX(\n BiasGeluGrad_dX, kMSDomain, 1, kRocmExecutionProvider, (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", BuildKernelDefConstraints())\n .MayInplace(0, 0), BiasGeluGrad_dX);\nONNX_OPERATOR_KERNEL_EX(\n BiasFastGeluGrad_dX, kMSDomain, 1, kRocmExecutionProvider, (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", BuildKernelDefConstraints())\n .MayInplace(0, 0), BiasGeluGrad_dX);\ntemplate \ntemplate \nvoid BiasGeluGrad_dX::KernelLaunchDispatcher::operator()(\n hipStream_t stream, int64_t input_size, int64_t bias_size, const Tensor& dY, const Tensor& X, const Tensor& B, Tensor& dX) const {\n using HipT = typename ToHipType::MappedType;\n LaunchBiasGeluGradDxKernel(\n stream, input_size, bias_size, reinterpret_cast(dY.template Data()), reinterpret_cast(X.template Data()), reinterpret_cast(B.template Data()), reinterpret_cast(dX.template MutableData()));\n}\ntemplate \nStatus BiasGeluGrad_dX::ComputeInternal(OpKernelContext* context) const {\n const auto* dY = context->Input(0);\n ORT_ENFORCE(dY);\n const auto* X = context->Input(1);\n ORT_ENFORCE(X);\n const auto* B = context->Input(2);\n ORT_ENFORCE(B);\n const auto& input_shape = X->Shape();\n ORT_ENFORCE(input_shape == dY->Shape(), \"dY and X must have the same shape.\");\n const auto& bias_shape = B->Shape();\n ORT_ENFORCE(\n input_shape.NumDimensions() >= 1 && bias_shape.NumDimensions() == 1 &&\n input_shape.GetDims().back() == bias_shape.GetDims().back(), \"B must be 1-dimensional and match the last dimension of X.\");\n auto* dX = context->Output(0, input_shape);\n ORT_ENFORCE(dX);\n const auto input_size = input_shape.Size(), bias_size = bias_shape.Size();\n utils::MLTypeCallDispatcher dispatcher{X->GetElementType()};\n dispatcher.Invoke(Stream(context), input_size, bias_size, *dY, *X, *B, *dX);\n return Status::OK();\n}\n} \n} ###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nclass BiasGeluGrad_dX : public CudaKernel {\n public:\n BiasGeluGrad_dX(const OpKernelInfo& info) : CudaKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n template \n struct KernelLaunchDispatcher {\n void operator()(\n cudaStream_t stream,\n int64_t input_size, int64_t bias_size,\n const Tensor& dY, const Tensor& X, const Tensor& B,\n Tensor& dX) const;\n };\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nclass BiasGeluGrad_dX : public RocmKernel {\n public:\n BiasGeluGrad_dX(const OpKernelInfo& info) : RocmKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n template \n struct KernelLaunchDispatcher {\n void operator()(\n hipStream_t stream,\n int64_t input_size, int64_t bias_size,\n const Tensor& dY, const Tensor& X, const Tensor& B,\n Tensor& dX) const;\n };\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n\nnamespace onnxruntime {\nnamespace cuda {\n\n// assumptions:\n// - dY, X, dX have input_size elements\n// - B has bias_size elements\n// - input_size % bias_size == 0\ntemplate \nvoid LaunchBiasGeluGradDxKernel(\n cudaStream_t stream,\n int64_t input_size, int64_t bias_size,\n const T* dY, const T* X, const T* B, T* dX);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n\nnamespace onnxruntime {\nnamespace rocm {\n\n// assumptions:\n// - dY, X, dX have input_size elements\n// - B has bias_size elements\n// - input_size % bias_size == 0\ntemplate \nvoid LaunchBiasGeluGradDxKernel(\n hipStream_t stream,\n int64_t input_size, int64_t bias_size,\n const T* dY, const T* X, const T* B, T* dX);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"orttraining/core/graph/horovod_adapters.h\"\n#include \"orttraining/core/graph/optimizer_config.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass HorovodAllReduce final : public CudaKernel {\n public:\n HorovodAllReduce(const OpKernelInfo& info) : CudaKernel(info) {\n unique_name = \"AllReduceNode_\" + info.node().Name();\n int64_t reduce_op;\n // bugbug\n int64_t adasum_type = training::AdasumReductionType::None;\n info.GetAttrOrDefault(\"reduce_op\", &reduce_op, static_cast(hvd::ReduceOp::SUM));\n info.GetAttrOrDefault(\"reduce_algo\", &adasum_type, static_cast(training::AdasumReductionType::None));\n reduce_op_ = GetReduceOp(reduce_op);\n adasum_type_ = static_cast(adasum_type);\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n std::string unique_name;\n hvd::ReduceOp reduce_op_;\n training::AdasumReductionType adasum_type_;\n};\n\nclass HorovodBarrier final : public CudaKernel {\n public:\n HorovodBarrier(const OpKernelInfo& info) : CudaKernel(info) {\n // bugbug\n int64_t adasum_type = training::AdasumReductionType::None;\n info.GetAttrOrDefault(\"reduce_algo\", &adasum_type, static_cast(training::AdasumReductionType::None));\n adasum_type_ = static_cast(adasum_type);\n }\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n training::AdasumReductionType adasum_type_;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"orttraining/core/graph/horovod_adapters.h\"\n#include \"orttraining/core/graph/optimizer_config.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass HorovodAllReduce final : public RocmKernel {\n public:\n HorovodAllReduce(const OpKernelInfo& info) : RocmKernel(info) {\n unique_name = \"AllReduceNode_\" + info.node().Name();\n int64_t reduce_op;\n // bugbug\n int64_t adasum_type = training::AdasumReductionType::None;\n info.GetAttrOrDefault(\"reduce_op\", &reduce_op, static_cast(hvd::ReduceOp::SUM));\n info.GetAttrOrDefault(\"reduce_algo\", &adasum_type, static_cast(training::AdasumReductionType::None));\n reduce_op_ = GetReduceOp(reduce_op);\n adasum_type_ = static_cast(adasum_type);\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n std::string unique_name;\n hvd::ReduceOp reduce_op_;\n training::AdasumReductionType adasum_type_;\n};\n\nclass HorovodBarrier final : public RocmKernel {\n public:\n HorovodBarrier(const OpKernelInfo& info) : RocmKernel(info) {\n // bugbug\n int64_t adasum_type = training::AdasumReductionType::None;\n info.GetAttrOrDefault(\"reduce_algo\", &adasum_type, static_cast(training::AdasumReductionType::None));\n adasum_type_ = static_cast(adasum_type);\n }\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n training::AdasumReductionType adasum_type_;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/cuda/math/clip.h\"\n#include \"core/providers/cuda/cuda_common.h\"\n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\ntemplate \nvoid ClipImpl(cudaStream_t stream, const T* input_data, T* output_data, const T* min, const T* max, T min_default, T max_default, size_t count);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/rocm/math/clip.h\"\n#include \"core/providers/rocm/rocm_common.h\"\n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\ntemplate \nvoid ClipImpl(hipStream_t stream, const T* input_data, T* output_data, const T* min, const T* max, T min_default, T max_default, size_t count);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n\n\n#pragma once\n#include \"core/providers/cuda/cu_inc/common.cuh\"\n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\ntemplate \n__device__ __forceinline__ void SetBitmask(const CUDA_LONG id, const CUDA_LONG mask_element_count, const fast_divmod fdm_bits_per_element, BitmaskElementType thread_bitmask, BitmaskElementType* mask_data) {\n int bitmask_idx, bitmask_shift;\n fdm_bits_per_element.divmod(id, bitmask_idx, bitmask_shift);\n BitmaskElementType bitmask = (thread_bitmask << bitmask_shift);\n#if defined(USE_CUDA) && __CUDA_ARCH__ >= 800\n \n BitmaskElementType thread_mask = __match_any_sync(0xFFFFFFFF, bitmask_idx);\n \n \n bitmask = __reduce_or_sync(thread_mask, bitmask);\n#else\n#pragma unroll\n for (int stride = kNumBitsPerBitmaskElement / (NumUnroll * 2); stride > 0; stride /= 2) {\n bitmask |= WARP_SHFL_DOWN(bitmask, stride);\n }\n#endif\n \n if (bitmask_shift == 0 && bitmask_idx < mask_element_count) {\n mask_data[bitmask_idx] = bitmask;\n }\n}\ntemplate \n__device__ __forceinline__ void GetMasks(CUDA_LONG id, const fast_divmod fdm_bits_per_element, const BitmaskElementType* mask_data, bool* mask_result) {\n int bitmask_idx, bitmask_shift;\n fdm_bits_per_element.divmod(id, bitmask_idx, bitmask_shift);\n BitmaskElementType shifted_mask = mask_data[bitmask_idx] >> bitmask_shift;\n#pragma unroll\n for (int i = 0; i < NumUnroll; i++) {\n mask_result[i] = (shifted_mask & (1 << i)) != 0;\n }\n}\n} \n} \n\n###", "hip": " \n\n#pragma once\n#include \"core/providers/rocm/cu_inc/common.cuh\"\n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\ntemplate \n__device__ __forceinline__ void SetBitmask(const HIP_LONG id, const HIP_LONG mask_element_count, const fast_divmod fdm_bits_per_element, BitmaskElementType thread_bitmask, BitmaskElementType* mask_data) {\n int bitmask_idx, bitmask_shift;\n fdm_bits_per_element.divmod(id, bitmask_idx, bitmask_shift);\n BitmaskElementType bitmask = (thread_bitmask << bitmask_shift);\n#if defined(USE_ROCM) && __CUDA_ARCH__ >= 800\n \n BitmaskElementType thread_mask = __match_any_sync(0xFFFFFFFF, bitmask_idx);\n \n \n bitmask = __reduce_or_sync(thread_mask, bitmask);\n#else\n#pragma unroll\n for (int stride = kNumBitsPerBitmaskElement / (NumUnroll * 2); stride > 0; stride /= 2) {\n bitmask |= WARP_SHFL_DOWN(bitmask, stride);\n }\n#endif\n \n if (bitmask_shift == 0 && bitmask_idx < mask_element_count) {\n mask_data[bitmask_idx] = bitmask;\n }\n}\ntemplate \n__device__ __forceinline__ void GetMasks(HIP_LONG id, const fast_divmod fdm_bits_per_element, const BitmaskElementType* mask_data, bool* mask_result) {\n int bitmask_idx, bitmask_shift;\n fdm_bits_per_element.divmod(id, bitmask_idx, bitmask_shift);\n BitmaskElementType shifted_mask = mask_data[bitmask_idx] >> bitmask_shift;\n#pragma unroll\n for (int i = 0; i < NumUnroll; i++) {\n mask_result[i] = (shifted_mask & (1 << i)) != 0;\n }\n}\n} \n} ###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"orttraining/core/framework/distributed_run_context.h\"\n\n#if defined(ORT_USE_NCCL)\n#include \n#endif\n\nnamespace onnxruntime {\nnamespace cuda {\n\n#if defined(ORT_USE_NCCL)\n#define NCCL_RETURN_IF_ERROR(expr) ORT_RETURN_IF_ERROR(NCCL_CALL(expr))\n#endif\nclass NcclContext final {\n public:\n NcclContext();\n ~NcclContext();\n\n ncclComm_t Comm(training::WorkerGroupType group_type);\n\n int Rank(training::WorkerGroupType group_type) const {\n return training::DistributedRunContext::RankInGroup(group_type);\n }\n\n int Size(training::WorkerGroupType group_type) const {\n return training::DistributedRunContext::GroupSize(group_type);\n }\n\n private:\n ncclComm_t global_group_comm_;\n ncclComm_t data_group_comm_;\n ncclComm_t node_local_comm_;\n ncclComm_t cross_node_comm_;\n ncclComm_t horizontal_group_comm_;\n};\n\n// -----------------------------------------------------------------------\n// Base class for NCCL kernels\n// -----------------------------------------------------------------------\nclass NcclKernel : public CudaKernel {\n public:\n explicit NcclKernel(const OpKernelInfo& info);\n\n protected:\n NcclContext* nccl_ = nullptr;\n training::WorkerGroupType group_type_;\n};\n\nncclDataType_t GetNcclDataType(onnxruntime::MLDataType type);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"orttraining/core/framework/distributed_run_context.h\"\n\n#if defined(ORT_USE_NCCL)\n#include \n#endif\n\nnamespace onnxruntime {\nnamespace rocm {\n\n#if defined(ORT_USE_NCCL)\n#define NCCL_RETURN_IF_ERROR(expr) ORT_RETURN_IF_ERROR(NCCL_CALL(expr))\n#endif\nclass NcclContext final {\n public:\n NcclContext();\n ~NcclContext();\n\n ncclComm_t Comm(training::WorkerGroupType group_type);\n\n int Rank(training::WorkerGroupType group_type) const {\n return training::DistributedRunContext::RankInGroup(group_type);\n }\n\n int Size(training::WorkerGroupType group_type) const {\n return training::DistributedRunContext::GroupSize(group_type);\n }\n\n private:\n ncclComm_t global_group_comm_;\n ncclComm_t data_group_comm_;\n ncclComm_t node_local_comm_;\n ncclComm_t cross_node_comm_;\n ncclComm_t horizontal_group_comm_;\n};\n\n// -----------------------------------------------------------------------\n// Base class for NCCL kernels\n// -----------------------------------------------------------------------\nclass NcclKernel : public RocmKernel {\n public:\n explicit NcclKernel(const OpKernelInfo& info);\n\n protected:\n NcclContext* nccl_ = nullptr;\n training::WorkerGroupType group_type_;\n};\n\nncclDataType_t GetNcclDataType(onnxruntime::MLDataType type);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"nccl_common.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass NcclAllReduce final : public NcclKernel {\n public:\n explicit NcclAllReduce(const OpKernelInfo& info);\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\nclass NcclAllGather final : public NcclKernel {\n public:\n explicit NcclAllGather(const OpKernelInfo& info);\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\nclass NcclReduceScatter final : public NcclKernel {\n public:\n explicit NcclReduceScatter(const OpKernelInfo& info);\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"nccl_common.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass NcclAllReduce final : public NcclKernel {\n public:\n explicit NcclAllReduce(const OpKernelInfo& info);\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\nclass NcclAllGather final : public NcclKernel {\n public:\n explicit NcclAllGather(const OpKernelInfo& info);\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\nclass NcclReduceScatter final : public NcclKernel {\n public:\n explicit NcclReduceScatter(const OpKernelInfo& info);\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#if defined(ORT_USE_NCCL) || defined(USE_MPI)\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass Recv final : public CudaKernel {\n public:\n Recv(const OpKernelInfo& info) : CudaKernel(info) {\n ORT_ENFORCE(info.GetAttr(\"tag\", &tag_).IsOK());\n ORT_ENFORCE(info.GetAttrs(\"element_types\", element_types_).IsOK());\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n void ReceiveData(\n const int num_tensors,\n std::vector received_tensors,\n const int src,\n const size_t aggregated_aligned_tensor_bytes,\n OpKernelContext* context,\n IAllocatorUniquePtr& buffer) const;\n int64_t tag_;\n std::vector element_types_;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n#endif\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#if defined(ORT_USE_NCCL) || defined(USE_MPI)\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass Recv final : public RocmKernel {\n public:\n Recv(const OpKernelInfo& info) : RocmKernel(info) {\n ORT_ENFORCE(info.GetAttr(\"tag\", &tag_).IsOK());\n ORT_ENFORCE(info.GetAttrs(\"element_types\", element_types_).IsOK());\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n void ReceiveData(\n const int num_tensors,\n std::vector received_tensors,\n const int src,\n const size_t aggregated_aligned_tensor_bytes,\n OpKernelContext* context,\n IAllocatorUniquePtr& buffer) const;\n int64_t tag_;\n std::vector element_types_;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n\n#endif\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#if defined(ORT_USE_NCCL) || defined(USE_MPI)\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass Send final : public CudaKernel {\n public:\n Send(const OpKernelInfo& info) : CudaKernel(info) {\n ORT_ENFORCE(info.GetAttr(\"tag\", &tag_).IsOK());\n ORT_ENFORCE(info.GetAttrs(\"element_types\", element_types_).IsOK());\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n void SendData(\n OpKernelContext* ctx,\n const int dst,\n const int num_tensors,\n size_t aggregated_aligned_tensor_bytes,\n std::vector tensor_offsets_in_bytes,\n std::vector tensor_sizes_in_bytes) const;\n\n int64_t tag_;\n std::vector element_types_;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n#endif\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#if defined(ORT_USE_NCCL) || defined(USE_MPI)\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass Send final : public RocmKernel {\n public:\n Send(const OpKernelInfo& info) : RocmKernel(info) {\n ORT_ENFORCE(info.GetAttr(\"tag\", &tag_).IsOK());\n ORT_ENFORCE(info.GetAttrs(\"element_types\", element_types_).IsOK());\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n void SendData(\n OpKernelContext* ctx,\n const int dst,\n const int num_tensors,\n size_t aggregated_aligned_tensor_bytes,\n std::vector tensor_offsets_in_bytes,\n std::vector tensor_sizes_in_bytes) const;\n\n int64_t tag_;\n std::vector element_types_;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n\n#endif\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"orttraining/training_ops/cpu/controlflow/group.h\"\n#include \"core/providers/cuda/cuda_fwd.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nONNX_OPERATOR_KERNEL_EX(\n Group,\n kMSDomain,\n 1,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .OutputMemoryType(OrtMemTypeCPUOutput, 0)\n .TypeConstraint(\"B\", DataTypeImpl::GetTensorType())\n .TypeConstraint(\"T\", DataTypeImpl::AllTensorTypes()),\n onnxruntime::contrib::Group);\n\nONNX_OPERATOR_KERNEL_EX(\n PassThrough,\n kMSDomain,\n 1,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", DataTypeImpl::AllTensorTypes())\n .VariadicAlias(0, 0), // outputs and inputs are mapped one to one\n onnxruntime::contrib::PassThrough);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"orttraining/training_ops/cpu/controlflow/group.h\"\n#include \"core/providers/rocm/rocm_fwd.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nONNX_OPERATOR_KERNEL_EX(\n Group,\n kMSDomain,\n 1,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .OutputMemoryType(OrtMemTypeCPUOutput, 0)\n .TypeConstraint(\"B\", DataTypeImpl::GetTensorType())\n .TypeConstraint(\"T\", DataTypeImpl::AllTensorTypes()),\n onnxruntime::contrib::Group);\n\nONNX_OPERATOR_KERNEL_EX(\n PassThrough,\n kMSDomain,\n 1,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", DataTypeImpl::AllTensorTypes())\n .VariadicAlias(0, 0), // outputs and inputs are mapped one to one\n onnxruntime::contrib::PassThrough);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"orttraining/training_ops/cuda/controlflow/record.h\"\n#include \"core/providers/cpu/tensor/utils.h\"\n// Include event mechanism shared by CPU and GPU implementations.\n#include \"orttraining/training_ops/cpu/controlflow/event_pool.h\"\n#include \"orttraining/training_ops/cpu/controlflow/record.h\"\n#include \"core/providers/cuda/nvtx_profile.h\"\n#include \"core/providers/cuda/nvtx_profile_context.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nONNX_OPERATOR_KERNEL_EX(\n RecordEvent,\n kMSDomain,\n 1,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .InputMemoryType(OrtMemTypeCPUInput, 0) /* Keep EventIdentifier in CPU */\n .TypeConstraint(\"TInt64\", DataTypeImpl::GetTensorType())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .VariadicAlias(1, 0), // outputs and inputs are mapped one to one, with input offset by 1\n RecordEvent);\n\nStatus RecordEvent::ComputeInternal(OpKernelContext* ctx) const {\n#ifdef ENABLE_NVTX_PROFILE\n const Tensor* event_id_tensor = ctx->Input(0);\n const int64_t event_id = *(event_id_tensor->template Data());\n\n auto& profile_context = profile::Context::GetInstance();\n const auto tag = profile_context.GetThreadTagOrDefault(std::this_thread::get_id());\n profile::NvtxRangeCreator range(\n \"Batch-\" + tag + \" Record-\" + std::to_string(event_id), profile::Color::Magenta);\n range.Begin();\n#endif\n\n // Reuse CPU helper to record event because event tensor is a CPU tensor.\n onnxruntime::contrib::record_event_in_tensor(*ctx->Input(0));\n ORT_ENFORCE(ctx->GetComputeStream());\n for (int i_out = 0; i_out < ctx->OutputCount(); ++i_out) {\n // This iteration copies (i-1)-th input to i-th output.\n const Tensor* X = ctx->Input(i_out + 1);\n const TensorShape& data_shape = X->Shape();\n Tensor* Y = ctx->Output(i_out, data_shape);\n ORT_RETURN_IF_ERROR(CopyTensor(*X, *Y, *ctx->GetComputeStream()));\n }\n\n#ifdef ENABLE_NVTX_PROFILE\n range.End();\n#endif\n\n return Status::OK();\n}\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"orttraining/training_ops/rocm/controlflow/record.h\"\n#include \"core/providers/cpu/tensor/utils.h\"\n// Include event mechanism shared by CPU and GPU implementations.\n#include \"orttraining/training_ops/cpu/controlflow/event_pool.h\"\n#include \"orttraining/training_ops/cpu/controlflow/record.h\"\n#include \"core/providers/rocm/nvtx_profile.h\"\n#include \"core/providers/rocm/nvtx_profile_context.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nONNX_OPERATOR_KERNEL_EX(\n RecordEvent,\n kMSDomain,\n 1,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .InputMemoryType(OrtMemTypeCPUInput, 0) /* Keep EventIdentifier in CPU */\n .TypeConstraint(\"TInt64\", DataTypeImpl::GetTensorType())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .VariadicAlias(1, 0), // outputs and inputs are mapped one to one, with input offset by 1\n RecordEvent);\n\nStatus RecordEvent::ComputeInternal(OpKernelContext* ctx) const {\n#ifdef ENABLE_NVTX_PROFILE\n const Tensor* event_id_tensor = ctx->Input(0);\n const int64_t event_id = *(event_id_tensor->template Data());\n\n auto& profile_context = profile::Context::GetInstance();\n const auto tag = profile_context.GetThreadTagOrDefault(std::this_thread::get_id());\n profile::NvtxRangeCreator range(\n \"Batch-\" + tag + \" Record-\" + std::to_string(event_id), profile::Color::Magenta);\n range.Begin();\n#endif\n\n // Reuse CPU helper to record event because event tensor is a CPU tensor.\n onnxruntime::contrib::record_event_in_tensor(*ctx->Input(0));\n ORT_ENFORCE(ctx->GetComputeStream());\n for (int i_out = 0; i_out < ctx->OutputCount(); ++i_out) {\n // This iteration copies (i-1)-th input to i-th output.\n const Tensor* X = ctx->Input(i_out + 1);\n const TensorShape& data_shape = X->Shape();\n Tensor* Y = ctx->Output(i_out, data_shape);\n ORT_RETURN_IF_ERROR(CopyTensor(*X, *Y, *ctx->GetComputeStream()));\n }\n\n#ifdef ENABLE_NVTX_PROFILE\n range.End();\n#endif\n\n return Status::OK();\n}\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"core/providers/cuda/cudnn_common.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass RecordEvent final : public CudaKernel {\n public:\n RecordEvent(const OpKernelInfo& info) : CudaKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"core/providers/rocm/miopen_common.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass RecordEvent final : public RocmKernel {\n public:\n RecordEvent(const OpKernelInfo& info) : RocmKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace rocm\n} // namespace onnxruntime###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"orttraining/training_ops/cuda/controlflow/wait.h\"\n#include \"core/providers/cpu/tensor/utils.h\"\n// Include event mechanism shared by CPU and GPU implementations.\n#include \"orttraining/training_ops/cpu/controlflow/event_pool.h\"\n#include \"orttraining/training_ops/cpu/controlflow/wait.h\"\n#include \"core/providers/cuda/nvtx_profile.h\"\n#include \"core/providers/cuda/nvtx_profile_context.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nONNX_OPERATOR_KERNEL_EX(\n WaitEvent,\n kMSDomain,\n 1,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .InputMemoryType(OrtMemTypeCPUInput, 0) /* CPU variable */\n .TypeConstraint(\"TInt64\", DataTypeImpl::GetTensorType())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .VariadicAlias(1, 0), // outputs and inputs are mapped one to one, with input offset by 1\n WaitEvent);\n\nStatus WaitEvent::ComputeInternal(OpKernelContext* ctx) const {\n#ifdef ENABLE_NVTX_PROFILE\n const Tensor* event_id_tensor = ctx->Input(0);\n const int64_t event_id = *(event_id_tensor->template Data());\n\n auto& profile_context = profile::Context::GetInstance();\n const auto tag = profile_context.GetThreadTagOrDefault(std::this_thread::get_id());\n profile::NvtxRangeCreator range(\n \"Batch-\" + tag + \" Wait-\" + std::to_string(event_id), profile::Color::Blue);\n range.Begin();\n#endif\n\n // Reuse CPU helper to wait event because event tensor is a CPU tensor.\n onnxruntime::contrib::wait_event_in_tensor(*ctx->Input(0));\n ORT_ENFORCE(ctx->GetComputeStream());\n for (int i_out = 0; i_out < ctx->OutputCount(); ++i_out) {\n // This iteration copies (i-1)-th input to i-th output.\n const Tensor* X = ctx->Input(i_out + 1);\n const TensorShape& data_shape = X->Shape();\n Tensor* Y = ctx->Output(i_out, data_shape);\n ORT_RETURN_IF_ERROR(CopyTensor(*X, *Y, *ctx->GetComputeStream()));\n }\n\n#ifdef ENABLE_NVTX_PROFILE\n range.End();\n#endif\n\n return Status::OK();\n}\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"orttraining/training_ops/rocm/controlflow/wait.h\"\n#include \"core/providers/cpu/tensor/utils.h\"\n// Include event mechanism shared by CPU and GPU implementations.\n#include \"orttraining/training_ops/cpu/controlflow/event_pool.h\"\n#include \"orttraining/training_ops/cpu/controlflow/wait.h\"\n#include \"core/providers/rocm/nvtx_profile.h\"\n#include \"core/providers/rocm/nvtx_profile_context.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nONNX_OPERATOR_KERNEL_EX(\n WaitEvent,\n kMSDomain,\n 1,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .InputMemoryType(OrtMemTypeCPUInput, 0) /* CPU variable */\n .TypeConstraint(\"TInt64\", DataTypeImpl::GetTensorType())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .VariadicAlias(1, 0), // outputs and inputs are mapped one to one, with input offset by 1\n WaitEvent);\n\nStatus WaitEvent::ComputeInternal(OpKernelContext* ctx) const {\n#ifdef ENABLE_NVTX_PROFILE\n const Tensor* event_id_tensor = ctx->Input(0);\n const int64_t event_id = *(event_id_tensor->template Data());\n\n auto& profile_context = profile::Context::GetInstance();\n const auto tag = profile_context.GetThreadTagOrDefault(std::this_thread::get_id());\n profile::NvtxRangeCreator range(\n \"Batch-\" + tag + \" Wait-\" + std::to_string(event_id), profile::Color::Blue);\n range.Begin();\n#endif\n\n // Reuse CPU helper to wait event because event tensor is a CPU tensor.\n onnxruntime::contrib::wait_event_in_tensor(*ctx->Input(0));\n ORT_ENFORCE(ctx->GetComputeStream());\n for (int i_out = 0; i_out < ctx->OutputCount(); ++i_out) {\n // This iteration copies (i-1)-th input to i-th output.\n const Tensor* X = ctx->Input(i_out + 1);\n const TensorShape& data_shape = X->Shape();\n Tensor* Y = ctx->Output(i_out, data_shape);\n ORT_RETURN_IF_ERROR(CopyTensor(*X, *Y, *ctx->GetComputeStream()));\n }\n\n#ifdef ENABLE_NVTX_PROFILE\n range.End();\n#endif\n\n return Status::OK();\n}\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"core/providers/cuda/cudnn_common.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass WaitEvent final : public CudaKernel {\n public:\n WaitEvent(const OpKernelInfo& info) : CudaKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"core/providers/rocm/miopen_common.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass WaitEvent final : public RocmKernel {\n public:\n WaitEvent(const OpKernelInfo& info) : RocmKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace rocm\n} // namespace onnxruntime###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"orttraining/training_ops/cpu/controlflow/yield.h\"\n#include \"core/providers/cuda/cuda_fwd.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nONNX_OPERATOR_KERNEL_EX(\n YieldOp,\n kMSDomain,\n 1,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .ExternalOutputs(),\n onnxruntime::contrib::YieldOp);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"orttraining/training_ops/cpu/controlflow/yield.h\"\n#include \"core/providers/rocm/rocm_fwd.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nONNX_OPERATOR_KERNEL_EX(\n YieldOp,\n kMSDomain,\n 1,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .ExternalOutputs(),\n onnxruntime::contrib::YieldOp);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n#pragma once\n\n#include \"core/common/common.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass CumSum final : public CudaKernel {\n public:\n explicit CumSum(const OpKernelInfo& info) : CudaKernel(info) {\n // Process exclusive attribute\n int64_t exclusive = 0;\n auto status = info.GetAttr(\"exclusive\", &exclusive);\n if (status.IsOK()) {\n if (exclusive == 1 || exclusive == 0) {\n exclusive_ = (exclusive == 1);\n } else {\n ORT_ENFORCE(\"attribute exclusive can only be 0 or 1\");\n }\n }\n\n // Process reverse attribute\n int64_t reverse = 0;\n status = info.GetAttr(\"reverse\", &reverse);\n if (status.IsOK()) {\n if (reverse == 1 || reverse == 0) {\n reverse_ = (reverse == 1);\n } else {\n ORT_ENFORCE(\"attribute reverse can only be 0 or 1\");\n }\n }\n }\n\n ~CumSum() = default;\n\n Status ComputeInternal(OpKernelContext* ctx) const override;\n\n private:\n bool exclusive_ = false;\n bool reverse_ = false;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n#pragma once\n\n#include \"core/common/common.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass CumSum final : public RocmKernel {\n public:\n explicit CumSum(const OpKernelInfo& info) : RocmKernel(info) {\n // Process exclusive attribute\n int64_t exclusive = 0;\n auto status = info.GetAttr(\"exclusive\", &exclusive);\n if (status.IsOK()) {\n if (exclusive == 1 || exclusive == 0) {\n exclusive_ = (exclusive == 1);\n } else {\n ORT_ENFORCE(\"attribute exclusive can only be 0 or 1\");\n }\n }\n\n // Process reverse attribute\n int64_t reverse = 0;\n status = info.GetAttr(\"reverse\", &reverse);\n if (status.IsOK()) {\n if (reverse == 1 || reverse == 0) {\n reverse_ = (reverse == 1);\n } else {\n ORT_ENFORCE(\"attribute reverse can only be 0 or 1\");\n }\n }\n }\n\n ~CumSum() = default;\n\n Status ComputeInternal(OpKernelContext* ctx) const override;\n\n private:\n bool exclusive_ = false;\n bool reverse_ = false;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/cuda_common.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nclass GistBinarizeEncoderOp final : public CudaKernel {\n public:\n GistBinarizeEncoderOp(const OpKernelInfo& info) : CudaKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\ntemplate \nclass GistBinarizeDecoderOp final : public CudaKernel {\n public:\n GistBinarizeDecoderOp(const OpKernelInfo& info) : CudaKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\ntemplate \nclass GistPack1EncoderOp final : public CudaKernel {\n public:\n static constexpr int GIST_PACK1_FACTOR = 8;\n GistPack1EncoderOp(const OpKernelInfo& info) : CudaKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\ntemplate \nclass GistPack1DecoderOp final : public CudaKernel {\n public:\n static constexpr int GIST_PACK1_FACTOR = 8;\n GistPack1DecoderOp(const OpKernelInfo& info) : CudaKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\ntemplate \nclass GistPack8EncoderOp final : public CudaKernel {\n public:\n GistPack8EncoderOp(const OpKernelInfo& info) : CudaKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\ntemplate \nclass GistPack8DecoderOp final : public CudaKernel {\n public:\n GistPack8DecoderOp(const OpKernelInfo& info) : CudaKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\ntemplate \nclass GistPack16EncoderOp final : public CudaKernel {\n public:\n GistPack16EncoderOp(const OpKernelInfo& info) : CudaKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\ntemplate \nclass GistPack16DecoderOp final : public CudaKernel {\n public:\n GistPack16DecoderOp(const OpKernelInfo& info) : CudaKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\ntemplate \nclass GistPackMsfp15EncoderOp final : public CudaKernel {\n public:\n GistPackMsfp15EncoderOp(const OpKernelInfo& info) : CudaKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\ntemplate \nclass GistPackMsfp15DecoderOp final : public CudaKernel {\n public:\n GistPackMsfp15DecoderOp(const OpKernelInfo& info) : CudaKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/rocm_common.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nclass GistBinarizeEncoderOp final : public RocmKernel {\n public:\n GistBinarizeEncoderOp(const OpKernelInfo& info) : RocmKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\ntemplate \nclass GistBinarizeDecoderOp final : public RocmKernel {\n public:\n GistBinarizeDecoderOp(const OpKernelInfo& info) : RocmKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\ntemplate \nclass GistPack1EncoderOp final : public RocmKernel {\n public:\n static constexpr int GIST_PACK1_FACTOR = 8;\n GistPack1EncoderOp(const OpKernelInfo& info) : RocmKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\ntemplate \nclass GistPack1DecoderOp final : public RocmKernel {\n public:\n static constexpr int GIST_PACK1_FACTOR = 8;\n GistPack1DecoderOp(const OpKernelInfo& info) : RocmKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\ntemplate \nclass GistPack8EncoderOp final : public RocmKernel {\n public:\n GistPack8EncoderOp(const OpKernelInfo& info) : RocmKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\ntemplate \nclass GistPack8DecoderOp final : public RocmKernel {\n public:\n GistPack8DecoderOp(const OpKernelInfo& info) : RocmKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\ntemplate \nclass GistPack16EncoderOp final : public RocmKernel {\n public:\n GistPack16EncoderOp(const OpKernelInfo& info) : RocmKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\ntemplate \nclass GistPack16DecoderOp final : public RocmKernel {\n public:\n GistPack16DecoderOp(const OpKernelInfo& info) : RocmKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\ntemplate \nclass GistPackMsfp15EncoderOp final : public RocmKernel {\n public:\n GistPackMsfp15EncoderOp(const OpKernelInfo& info) : RocmKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\ntemplate \nclass GistPackMsfp15DecoderOp final : public RocmKernel {\n public:\n GistPackMsfp15DecoderOp(const OpKernelInfo& info) : RocmKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n#include \n#include \n#include \n\nnamespace onnxruntime {\nnamespace cuda {\nstatic constexpr int GIST_PACK1_FACTOR = 8;\ntemplate \nvoid GistBinarizeEncoderImpl(\n cudaStream_t stream,\n const T* input_data,\n bool* output_data,\n const size_t nums_of_elements);\n\ntemplate \nvoid GistBinarizeDecoderImpl(\n cudaStream_t stream,\n const bool* input_data,\n T* output_data,\n const size_t nums_of_elements);\n\ntemplate \nvoid GistPack1EncoderImpl(\n cudaStream_t stream,\n const T* input_data,\n uint8_t* output_data,\n const size_t nums_of_elements);\n\ntemplate \nvoid GistPack1DecoderImpl(\n cudaStream_t stream,\n const uint8_t* input_data,\n T* output_data,\n const size_t nums_of_elements);\n\ntemplate \nvoid GistPack8EncoderImpl(\n cudaStream_t stream,\n const T* input_data,\n uint8_t* output_data,\n const size_t nums_of_elements);\n\ntemplate \nvoid GistPack8DecoderImpl(\n cudaStream_t stream,\n const uint8_t* input_data,\n T* output_data,\n const size_t nums_of_elements);\n\ntemplate \nvoid GistPack16EncoderImpl(\n cudaStream_t stream,\n const T* input_data,\n half* output_data,\n const size_t nums_of_elements);\n\ntemplate \nvoid GistPack16DecoderImpl(\n cudaStream_t stream,\n const half* input_data,\n T* output_data,\n const size_t nums_of_elements);\n\ntemplate \nvoid GistPackMsfp15EncoderImpl(\n cudaStream_t stream,\n const T* input_data,\n uint8_t* output_data,\n const size_t pre_axis_size,\n const size_t axis_size,\n const size_t tile_size);\n\ntemplate \nvoid GistPackMsfp15DecoderImpl(\n cudaStream_t stream,\n const uint8_t* input_data,\n T* output_data,\n const size_t pre_axis_size,\n const size_t axis_size,\n const size_t tile_size);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n#include \n#include \n#include \n\nnamespace onnxruntime {\nnamespace rocm {\nstatic constexpr int GIST_PACK1_FACTOR = 8;\ntemplate \nvoid GistBinarizeEncoderImpl(\n hipStream_t stream,\n const T* input_data,\n bool* output_data,\n const size_t nums_of_elements);\n\ntemplate \nvoid GistBinarizeDecoderImpl(\n hipStream_t stream,\n const bool* input_data,\n T* output_data,\n const size_t nums_of_elements);\n\ntemplate \nvoid GistPack1EncoderImpl(\n hipStream_t stream,\n const T* input_data,\n uint8_t* output_data,\n const size_t nums_of_elements);\n\ntemplate \nvoid GistPack1DecoderImpl(\n hipStream_t stream,\n const uint8_t* input_data,\n T* output_data,\n const size_t nums_of_elements);\n\ntemplate \nvoid GistPack8EncoderImpl(\n hipStream_t stream,\n const T* input_data,\n uint8_t* output_data,\n const size_t nums_of_elements);\n\ntemplate \nvoid GistPack8DecoderImpl(\n hipStream_t stream,\n const uint8_t* input_data,\n T* output_data,\n const size_t nums_of_elements);\n\ntemplate \nvoid GistPack16EncoderImpl(\n hipStream_t stream,\n const T* input_data,\n half* output_data,\n const size_t nums_of_elements);\n\ntemplate \nvoid GistPack16DecoderImpl(\n hipStream_t stream,\n const half* input_data,\n T* output_data,\n const size_t nums_of_elements);\n\ntemplate \nvoid GistPackMsfp15EncoderImpl(\n hipStream_t stream,\n const T* input_data,\n uint8_t* output_data,\n const size_t pre_axis_size,\n const size_t axis_size,\n const size_t tile_size);\n\ntemplate \nvoid GistPackMsfp15DecoderImpl(\n hipStream_t stream,\n const uint8_t* input_data,\n T* output_data,\n const size_t pre_axis_size,\n const size_t axis_size,\n const size_t tile_size);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"orttraining/training_ops/cpu/loss/reduction_type.h\"\n#include \"core/providers/cuda/reduction/reduction_ops.h\"\n#include \"orttraining/training_ops/cuda/loss/softmaxcrossentropy_impl.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nvoid SoftmaxCrossEntropyLossImpl(\n cudaStream_t stream,\n const T* log_prob,\n const TLabel* label,\n const T* weight,\n const TAcc* normalize_factor,\n size_t count,\n size_t label_depth,\n int64_t ignore_index,\n T* output_data);\n\ntemplate \nvoid SoftmaxCrossEntropyLossGradImpl(\n cudaStream_t stream,\n const T* dY,\n const T* log_prob,\n const TLabel* label,\n const T* weight,\n const TAcc* normalize_factor,\n const TOut* bias_data,\n size_t count,\n size_t label_depth,\n bool reduction_none,\n TOut* output_data);\n\ntemplate \nvoid ComputeSoftmaxCrossEntropyWeightsImpl(\n cudaStream_t stream,\n const TLabel* label,\n const T* weight,\n size_t count,\n size_t label_depth,\n int64_t ignore_index,\n TOut* weight_data_nd);\n\ntemplate \nclass SoftmaxCrossEntropyLoss final : public LossBase {\n public:\n SoftmaxCrossEntropyLoss(const OpKernelInfo& info) : LossBase(info) {\n int64_t default_ignore_index = -1;\n info.GetAttrOrDefault(\"ignore_index\", &ignore_index_, default_ignore_index);\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n int64_t ignore_index_;\n};\n\ntemplate \nclass SoftmaxCrossEntropyLossGrad final : public LossBase {\n public:\n SoftmaxCrossEntropyLossGrad(const OpKernelInfo& info) : LossBase(info) {\n int64_t default_ignore_index = -1;\n info.GetAttrOrDefault(\"ignore_index\", &ignore_index_, default_ignore_index);\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n int64_t ignore_index_;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"orttraining/training_ops/cpu/loss/reduction_type.h\"\n#include \"core/providers/rocm/reduction/reduction_ops.h\"\n#include \"orttraining/training_ops/rocm/loss/softmaxcrossentropy_impl.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nvoid SoftmaxCrossEntropyLossImpl(\n hipStream_t stream,\n const T* log_prob,\n const TLabel* label,\n const T* weight,\n const TAcc* normalize_factor,\n size_t count,\n size_t label_depth,\n int64_t ignore_index,\n T* output_data);\n\ntemplate \nvoid SoftmaxCrossEntropyLossGradImpl(\n hipStream_t stream,\n const T* dY,\n const T* log_prob,\n const TLabel* label,\n const T* weight,\n const TAcc* normalize_factor,\n const TOut* bias_data,\n size_t count,\n size_t label_depth,\n bool reduction_none,\n TOut* output_data);\n\ntemplate \nvoid ComputeSoftmaxCrossEntropyWeightsImpl(\n hipStream_t stream,\n const TLabel* label,\n const T* weight,\n size_t count,\n size_t label_depth,\n int64_t ignore_index,\n TOut* weight_data_nd);\n\ntemplate \nclass SoftmaxCrossEntropyLoss final : public LossBase {\n public:\n SoftmaxCrossEntropyLoss(const OpKernelInfo& info) : LossBase(info) {\n int64_t default_ignore_index = -1;\n info.GetAttrOrDefault(\"ignore_index\", &ignore_index_, default_ignore_index);\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n int64_t ignore_index_;\n};\n\ntemplate \nclass SoftmaxCrossEntropyLossGrad final : public LossBase {\n public:\n SoftmaxCrossEntropyLossGrad(const OpKernelInfo& info) : LossBase(info) {\n int64_t default_ignore_index = -1;\n info.GetAttrOrDefault(\"ignore_index\", &ignore_index_, default_ignore_index);\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n int64_t ignore_index_;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n\n\n#pragma once\n#include \"orttraining/training_ops/cpu/loss/reduction_type.h\"\n#include \"core/providers/cuda/reduction/reduction_ops.h\"\nnamespace onnxruntime {\nnamespace cuda {\ntemplate \nvoid SoftMaxCrossEntropyImpl(\n cudaStream_t stream, const T* log_prob, const T* label, size_t normalize_factor, T* output_data, size_t count);\ntemplate \nvoid SoftMaxCrossEntropyGradImpl(\n cudaStream_t stream, const T* dY, const T* log_prob, const T* label, size_t normalize_factor, T* output_data, size_t count);\ntemplate \nvoid SparseSoftmaxCrossEntropyImpl(\n cudaStream_t stream, const T* log_prob, const Tin* label, const T* weight, const T* normalize_factor, T* output_data, size_t count, size_t label_depth);\ntemplate \nvoid SparseSoftmaxCrossEntropyGradImpl(\n cudaStream_t stream, const T* dY, const T* log_prob, const Tin* label, const T* weight, const T* normalize_factor, T* output_data, size_t count, size_t label_depth);\nclass LossBase : public ReduceKernel {\n public:\n explicit LossBase(const OpKernelInfo& info)\n : ReduceKernel(info, int64_t(0)) {\n std::string reduction;\n ORT_ENFORCE(info.GetAttr(\"reduction\", &reduction).IsOK());\n reduction_ = StringToReductionType(reduction);\n }\n protected:\n ReductionType reduction_;\n};\ntemplate \nclass SoftmaxCrossEntropy final : public LossBase {\n public:\n SoftmaxCrossEntropy(const OpKernelInfo& info) : LossBase(info) {\n \n ORT_ENFORCE(reduction_ != ReductionType::NONE, \"Loss with reduction 'none' is not implemented.\");\n }\n Status ComputeInternal(OpKernelContext* context) const override;\n};\ntemplate \nclass SoftmaxCrossEntropyGrad final : public LossBase {\n public:\n SoftmaxCrossEntropyGrad(const OpKernelInfo& info) : LossBase(info) {\n \n ORT_ENFORCE(reduction_ != ReductionType::NONE, \"Loss with reduction 'none' is not implemented.\");\n }\n Status ComputeInternal(OpKernelContext* context) const override;\n};\ntemplate \nclass SparseSoftmaxCrossEntropy final : public LossBase {\n public:\n SparseSoftmaxCrossEntropy(const OpKernelInfo& info) : LossBase(info) {\n \n ORT_ENFORCE(reduction_ != ReductionType::NONE, \"Loss with reduction 'none' is not implemented.\");\n }\n Status ComputeInternal(OpKernelContext* context) const override;\n};\ntemplate \nclass SparseSoftmaxCrossEntropyGrad final : public LossBase {\n public:\n SparseSoftmaxCrossEntropyGrad(const OpKernelInfo& info) : LossBase(info) {\n \n ORT_ENFORCE(reduction_ != ReductionType::NONE, \"Loss with reduction 'none' is not implemented.\");\n }\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n} \n} \n\n###", "hip": " \n\n#pragma once\n#include \"orttraining/training_ops/cpu/loss/reduction_type.h\"\n#include \"core/providers/rocm/reduction/reduction_ops.h\"\nnamespace onnxruntime {\nnamespace rocm {\ntemplate \nvoid SoftMaxCrossEntropyImpl(\n hipStream_t stream, const T* log_prob, const T* label, size_t normalize_factor, T* output_data, size_t count);\ntemplate \nvoid SoftMaxCrossEntropyGradImpl(\n hipStream_t stream, const T* dY, const T* log_prob, const T* label, size_t normalize_factor, T* output_data, size_t count);\ntemplate \nvoid SparseSoftmaxCrossEntropyImpl(\n hipStream_t stream, const T* log_prob, const Tin* label, const T* weight, const T* normalize_factor, T* output_data, size_t count, size_t label_depth);\ntemplate \nvoid SparseSoftmaxCrossEntropyGradImpl(\n hipStream_t stream, const T* dY, const T* log_prob, const Tin* label, const T* weight, const T* normalize_factor, T* output_data, size_t count, size_t label_depth);\nclass LossBase : public ReduceKernel {\n public:\n explicit LossBase(const OpKernelInfo& info)\n : ReduceKernel(info, int64_t(0)) {\n std::string reduction;\n ORT_ENFORCE(info.GetAttr(\"reduction\", &reduction).IsOK());\n reduction_ = StringToReductionType(reduction);\n }\n protected:\n ReductionType reduction_;\n};\ntemplate \nclass SoftmaxCrossEntropy final : public LossBase {\n public:\n SoftmaxCrossEntropy(const OpKernelInfo& info) : LossBase(info) {\n \n ORT_ENFORCE(reduction_ != ReductionType::NONE, \"Loss with reduction 'none' is not implemented.\");\n }\n Status ComputeInternal(OpKernelContext* context) const override;\n};\ntemplate \nclass SoftmaxCrossEntropyGrad final : public LossBase {\n public:\n SoftmaxCrossEntropyGrad(const OpKernelInfo& info) : LossBase(info) {\n \n ORT_ENFORCE(reduction_ != ReductionType::NONE, \"Loss with reduction 'none' is not implemented.\");\n }\n Status ComputeInternal(OpKernelContext* context) const override;\n};\ntemplate \nclass SparseSoftmaxCrossEntropy final : public LossBase {\n public:\n SparseSoftmaxCrossEntropy(const OpKernelInfo& info) : LossBase(info) {\n \n ORT_ENFORCE(reduction_ != ReductionType::NONE, \"Loss with reduction 'none' is not implemented.\");\n }\n Status ComputeInternal(OpKernelContext* context) const override;\n};\ntemplate \nclass SparseSoftmaxCrossEntropyGrad final : public LossBase {\n public:\n SparseSoftmaxCrossEntropyGrad(const OpKernelInfo& info) : LossBase(info) {\n \n ORT_ENFORCE(reduction_ != ReductionType::NONE, \"Loss with reduction 'none' is not implemented.\");\n }\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n} \n} ###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass BiasSoftmaxDropout final : public onnxruntime::cuda::CudaKernel {\n public:\n BiasSoftmaxDropout(const OpKernelInfo& info) : CudaKernel{info} {\n info.GetAttrOrDefault(\"axis\", &axis_, static_cast(1));\n int64_t is_inner_broadcast_value;\n ORT_ENFORCE(info.GetAttr(\"is_inner_broadcast\", &is_inner_broadcast_value).IsOK());\n is_inner_broadcast_ = is_inner_broadcast_value != 0;\n int64_t seed = 0;\n if (info.GetAttr(\"seed\", &seed).IsOK()) {\n generator_ = std::make_unique(static_cast(seed));\n }\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n // For Softmax.\n int64_t axis_;\n bool is_inner_broadcast_;\n\n // For Dropout.\n mutable std::unique_ptr generator_;\n static constexpr float default_ratio_ = 0.5f;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass BiasSoftmaxDropout final : public onnxruntime::rocm::RocmKernel {\n public:\n BiasSoftmaxDropout(const OpKernelInfo& info) : RocmKernel{info} {\n info.GetAttrOrDefault(\"axis\", &axis_, static_cast(1));\n int64_t is_inner_broadcast_value;\n ORT_ENFORCE(info.GetAttr(\"is_inner_broadcast\", &is_inner_broadcast_value).IsOK());\n is_inner_broadcast_ = is_inner_broadcast_value != 0;\n int64_t seed = 0;\n if (info.GetAttr(\"seed\", &seed).IsOK()) {\n generator_ = std::make_unique(static_cast(seed));\n }\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n // For Softmax.\n int64_t axis_;\n bool is_inner_broadcast_;\n\n // For Dropout.\n mutable std::unique_ptr generator_;\n static constexpr float default_ratio_ = 0.5f;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/framework/random_generator.h\"\n#include \"core/providers/cuda/cuda_common.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nStatus BiasSoftmaxDropoutImpl(cudaStream_t stream, const cudaDeviceProp& prop, cudnnHandle_t cudnn_handle,\n T* dropout_output_data, bool* mask_data, T* softmax_output_data, const T* input_data,\n const T* bias_data, int element_count, int batch_count, bool is_inner_broadcast,\n int bias_broadcast_size, const float ratio, PhiloxGenerator& generator);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/framework/random_generator.h\"\n#include \"core/providers/rocm/rocm_common.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nStatus BiasSoftmaxDropoutImpl(hipStream_t stream, const hipDeviceProp_t& prop, miopenHandle_t miopen_handle,\n T* dropout_output_data, bool* mask_data, T* softmax_output_data, const T* input_data,\n const T* bias_data, int element_count, int batch_count, bool is_inner_broadcast,\n int bias_broadcast_size, const float ratio, PhiloxGenerator& generator);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/cuda_common.h\"\n#include \"core/providers/cuda/reduction/reduction_ops.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\ntemplate \nclass DivGrad : public ReduceKernel { // TODO: not to derive from ReduceKernel.\n // Use a cudnn reduce sum simple helper instead.\n public:\n DivGrad(const OpKernelInfo& info) : ReduceKernel(info, /*keep_dims_override*/ int64_t(0)) {}\n Status ComputeInternal(OpKernelContext*) const override;\n};\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/rocm_common.h\"\n#include \"core/providers/rocm/reduction/reduction_ops.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\ntemplate \nclass DivGrad : public ReduceKernel { // TODO: not to derive from ReduceKernel.\n // Use a miopen reduce sum simple helper instead.\n public:\n DivGrad(const OpKernelInfo& info) : ReduceKernel(info, /*keep_dims_override*/ int64_t(0)) {}\n Status ComputeInternal(OpKernelContext*) const override;\n};\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\ntemplate \nvoid ImplDivGradSimple(\n cudaStream_t stream,\n SimpleBroadcast simpleBroadcast,\n const T* a_data,\n const T* b_data,\n const T* dy_data,\n size_t count,\n T* da_output_data,\n T* db_output_data);\n\ntemplate \nvoid ImplDivGradRhsPerChannelBatch1(\n cudaStream_t stream,\n const T* a_data,\n const T* b_data,\n const T* dy_data,\n size_t count,\n const fast_divmod& fdm_H,\n T* da_output_data,\n T* db_output_data);\n\ntemplate \nvoid ImplDivGradRhsPerChannelBatchN(\n cudaStream_t stream,\n const T* a_data,\n const T* b_data,\n const T* dy_data,\n size_t count,\n const fast_divmod& fdm_H,\n const fast_divmod& fdm_C,\n T* da_output_data,\n T* db_output_data);\n\ntemplate \nvoid ImplDivGrad(\n cudaStream_t stream,\n int32_t output_rank,\n const TArray& a_padded_strides,\n const T* a_data,\n const TArray& b_padded_strides,\n const T* b_data,\n const T* dy_data,\n size_t count,\n const TArray& fdm_output_strides,\n T* da_output_data,\n T* db_output_data);\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\ntemplate \nvoid ImplDivGradSimple(\n hipStream_t stream,\n SimpleBroadcast simpleBroadcast,\n const T* a_data,\n const T* b_data,\n const T* dy_data,\n size_t count,\n T* da_output_data,\n T* db_output_data);\n\ntemplate \nvoid ImplDivGradRhsPerChannelBatch1(\n hipStream_t stream,\n const T* a_data,\n const T* b_data,\n const T* dy_data,\n size_t count,\n const fast_divmod& fdm_H,\n T* da_output_data,\n T* db_output_data);\n\ntemplate \nvoid ImplDivGradRhsPerChannelBatchN(\n hipStream_t stream,\n const T* a_data,\n const T* b_data,\n const T* dy_data,\n size_t count,\n const fast_divmod& fdm_H,\n const fast_divmod& fdm_C,\n T* da_output_data,\n T* db_output_data);\n\ntemplate \nvoid ImplDivGrad(\n hipStream_t stream,\n int32_t output_rank,\n const TArray& a_padded_strides,\n const T* a_data,\n const TArray& b_padded_strides,\n const T* b_data,\n const T* dy_data,\n size_t count,\n const TArray& fdm_output_strides,\n T* da_output_data,\n T* db_output_data);\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"orttraining/training_ops/cuda/math/isfinite.h\"\n#include \"orttraining/training_ops/cuda/math/isfinite_impl.h\"\n\nusing namespace ONNX_NAMESPACE;\nusing namespace onnxruntime::common;\nnamespace onnxruntime {\nnamespace cuda {\n\n#define REGISTER_ISFINITE_KERNEL_TYPED(T) \\\n ONNX_OPERATOR_TYPED_KERNEL_EX( \\\n IsFinite, \\\n kMSDomain, \\\n 1, \\\n T, \\\n kCudaExecutionProvider, \\\n (*KernelDefBuilder::Create()) \\\n .TypeConstraint(\"T\", DataTypeImpl::GetTensorType()) \\\n .TypeConstraint(\"T1\", DataTypeImpl::GetTensorType()), \\\n IsFiniteOp);\n\ntemplate \nStatus IsFiniteOp::ComputeInternal(OpKernelContext* context) const {\n typedef typename ToCudaType::MappedType CudaTSrc;\n const Tensor& input = *context->Input(0);\n Tensor& output = *context->Output(0, input.Shape());\n IsFinite(\n Stream(context),\n reinterpret_cast(input.Data()),\n output.MutableData(), input.Shape().Size());\n\n return Status::OK();\n}\n\nREGISTER_ISFINITE_KERNEL_TYPED(MLFloat16)\nREGISTER_ISFINITE_KERNEL_TYPED(float)\nREGISTER_ISFINITE_KERNEL_TYPED(double)\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"orttraining/training_ops/rocm/math/isfinite.h\"\n#include \"orttraining/training_ops/rocm/math/isfinite_impl.h\"\n\nusing namespace ONNX_NAMESPACE;\nusing namespace onnxruntime::common;\nnamespace onnxruntime {\nnamespace rocm {\n\n#define REGISTER_ISFINITE_KERNEL_TYPED(T) \\\n ONNX_OPERATOR_TYPED_KERNEL_EX( \\\n IsFinite, \\\n kMSDomain, \\\n 1, \\\n T, \\\n kRocmExecutionProvider, \\\n (*KernelDefBuilder::Create()) \\\n .TypeConstraint(\"T\", DataTypeImpl::GetTensorType()) \\\n .TypeConstraint(\"T1\", DataTypeImpl::GetTensorType()), \\\n IsFiniteOp);\n\ntemplate \nStatus IsFiniteOp::ComputeInternal(OpKernelContext* context) const {\n typedef typename ToHipType::MappedType HipTSrc;\n const Tensor& input = *context->Input(0);\n Tensor& output = *context->Output(0, input.Shape());\n IsFinite(\n Stream(context),\n reinterpret_cast(input.Data()),\n output.MutableData(), input.Shape().Size());\n\n return Status::OK();\n}\n\nREGISTER_ISFINITE_KERNEL_TYPED(MLFloat16)\nREGISTER_ISFINITE_KERNEL_TYPED(float)\nREGISTER_ISFINITE_KERNEL_TYPED(double)\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/common/common.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nclass IsFiniteOp final : public CudaKernel {\n public:\n IsFiniteOp(const OpKernelInfo& info) : CudaKernel(info) {\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/common/common.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nclass IsFiniteOp final : public RocmKernel {\n public:\n IsFiniteOp(const OpKernelInfo& info) : RocmKernel(info) {\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace rocm\n} // namespace onnxruntime###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nvoid CumSumImpl(\n cudaStream_t stream,\n const T* input_data,\n const fast_divmod& input_dim_along_axis,\n const fast_divmod& input_stride_along_axis,\n T* output_data,\n int64_t output_size,\n bool exclusive,\n bool reverse);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nvoid CumSumImpl(\n hipStream_t stream,\n const T* input_data,\n const fast_divmod& input_dim_along_axis,\n const fast_divmod& input_stride_along_axis,\n T* output_data,\n int64_t output_size,\n bool exclusive,\n bool reverse);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"isfinite_impl.h\"\n#include \n#include \"core/providers/cuda/cu_inc/common.cuh\"\n#include \"contrib_ops/cuda/math/isfinite.cuh\"\n\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \n__global__ void _IsFinite(const TSrc* input, bool* output, CUDA_LONG N) {\n CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);\n output[id] = IsFiniteScalar(input[id]);\n}\n\ntemplate \nvoid IsFinite(cudaStream_t stream, const TSrc* input, bool* output, size_t count) {\n int blocksPerGrid = (int)(ceil(static_cast(count) / GridDim::maxThreadsPerBlock));\n CUDA_LONG N = static_cast(count);\n _IsFinite<<>>(input, output, N);\n}\n\n#define SPECIALIZE_ISFINITE_IMPL(T) \\\n template void IsFinite(cudaStream_t stream, const T* input, bool* output, size_t count);\n\nSPECIALIZE_ISFINITE_IMPL(half)\nSPECIALIZE_ISFINITE_IMPL(float)\nSPECIALIZE_ISFINITE_IMPL(double)\n\n} // namespace cuda\n} // namespace onnxruntime\n\n###", "hip": " #include \"hip/hip_runtime.h\"\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"isfinite_impl.h\"\n#include \n#include \"core/providers/rocm/cu_inc/common.cuh\"\n#include \"contrib_ops/rocm/math/isfinite.cuh\"\n\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \n__global__ void _IsFinite(const TSrc* input, bool* output, HIP_LONG N) {\n CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);\n output[id] = IsFiniteScalar(input[id]);\n}\n\ntemplate \nvoid IsFinite(hipStream_t stream, const TSrc* input, bool* output, size_t count) {\n int blocksPerGrid = (int)(ceil(static_cast(count) / GridDim::maxThreadsPerBlock));\n HIP_LONG N = static_cast(count);\n _IsFinite<<>>(input, output, N);\n}\n\n#define SPECIALIZE_ISFINITE_IMPL(T) \\\n template void IsFinite(hipStream_t stream, const T* input, bool* output, size_t count);\n\nSPECIALIZE_ISFINITE_IMPL(half)\nSPECIALIZE_ISFINITE_IMPL(float)\nSPECIALIZE_ISFINITE_IMPL(double)\n\n} // namespace rocm\n} // namespace onnxruntime###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nvoid IsFinite(cudaStream_t stream, const TSrc* input, bool* output, size_t N);\n\n}\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nvoid IsFinite(hipStream_t stream, const TSrc* input, bool* output, size_t N);\n\n}\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nclass MixedPrecisionScale final : public CudaKernel {\n public:\n MixedPrecisionScale(const OpKernelInfo& info);\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n ONNX_NAMESPACE::TensorProto_DataType to_;\n size_t bytes_per_output_elem_;\n bool fuse_outputs_;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nclass MixedPrecisionScale final : public RocmKernel {\n public:\n MixedPrecisionScale(const OpKernelInfo& info);\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n ONNX_NAMESPACE::TensorProto_DataType to_;\n size_t bytes_per_output_elem_;\n bool fuse_outputs_;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#ifdef _WIN32\n#pragma warning(disable : 4244)\n#endif\n\n#include \"mixed_precision_scale_impl.h\"\n#include \n#include \"core/providers/cuda/cu_inc/common.cuh\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \n__global__ void _MixedPrecisionScale(\n const SrcT* input_data,\n const float* scale_data,\n DstT* output_data,\n CUDA_LONG N) {\n CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);\n output_data[id] = static_cast(*scale_data * static_cast(input_data[id]));\n}\n\ntemplate \nvoid Impl_MixedPrecisionScale(\n cudaStream_t stream,\n const SrcT* input_data,\n const float* scale_data,\n DstT* output_data,\n size_t count){\n int blocksPerGrid = static_cast(CeilDiv(count, GridDim::maxThreadsPerBlock));\n CUDA_LONG N = static_cast(count);\n _MixedPrecisionScale<<>>(\n input_data,\n scale_data,\n output_data,\n N);\n}\n\n#define SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(SrcT, DstT) \\\ntemplate void Impl_MixedPrecisionScale( \\\n cudaStream_t stream, \\\n const SrcT* input_data, \\\n const float* scale_data, \\\n DstT* output_data, \\\n size_t count);\n\nSPECIALIZE_MIXEDPRECISIONSCALE_IMPL(half, half)\nSPECIALIZE_MIXEDPRECISIONSCALE_IMPL(half, float)\nSPECIALIZE_MIXEDPRECISIONSCALE_IMPL(float, half)\nSPECIALIZE_MIXEDPRECISIONSCALE_IMPL(float, float)\nSPECIALIZE_MIXEDPRECISIONSCALE_IMPL(BFloat16, BFloat16)\nSPECIALIZE_MIXEDPRECISIONSCALE_IMPL(BFloat16, float)\nSPECIALIZE_MIXEDPRECISIONSCALE_IMPL(float, BFloat16)\nSPECIALIZE_MIXEDPRECISIONSCALE_IMPL(BFloat16, half)\nSPECIALIZE_MIXEDPRECISIONSCALE_IMPL(half, BFloat16)\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " #include \"hip/hip_runtime.h\"\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#ifdef _WIN32\n#pragma warning(disable : 4244)\n#endif\n\n#include \"mixed_precision_scale_impl.h\"\n#include \n#include \"core/providers/rocm/cu_inc/common.cuh\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \n__global__ void _MixedPrecisionScale(\n const SrcT* input_data,\n const float* scale_data,\n DstT* output_data,\n HIP_LONG N) {\n CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);\n output_data[id] = static_cast(*scale_data * static_cast(input_data[id]));\n}\n\ntemplate \nvoid Impl_MixedPrecisionScale(\n hipStream_t stream,\n const SrcT* input_data,\n const float* scale_data,\n DstT* output_data,\n size_t count){\n int blocksPerGrid = static_cast(CeilDiv(count, GridDim::maxThreadsPerBlock));\n HIP_LONG N = static_cast(count);\n _MixedPrecisionScale<<>>(\n input_data,\n scale_data,\n output_data,\n N);\n}\n\n#define SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(SrcT, DstT) \\\ntemplate void Impl_MixedPrecisionScale( \\\n hipStream_t stream, \\\n const SrcT* input_data, \\\n const float* scale_data, \\\n DstT* output_data, \\\n size_t count);\n\nSPECIALIZE_MIXEDPRECISIONSCALE_IMPL(half, half)\nSPECIALIZE_MIXEDPRECISIONSCALE_IMPL(half, float)\nSPECIALIZE_MIXEDPRECISIONSCALE_IMPL(float, half)\nSPECIALIZE_MIXEDPRECISIONSCALE_IMPL(float, float)\nSPECIALIZE_MIXEDPRECISIONSCALE_IMPL(BFloat16, BFloat16)\nSPECIALIZE_MIXEDPRECISIONSCALE_IMPL(BFloat16, float)\nSPECIALIZE_MIXEDPRECISIONSCALE_IMPL(float, BFloat16)\nSPECIALIZE_MIXEDPRECISIONSCALE_IMPL(BFloat16, half)\nSPECIALIZE_MIXEDPRECISIONSCALE_IMPL(half, BFloat16)\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n\nnamespace onnxruntime {\nnamespace cuda {\ntemplate \nvoid Impl_MixedPrecisionScale(\n cudaStream_t stream,\n const SrcT* input_data,\n const float* scale_data,\n DstT* output_data,\n size_t count);\n}\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n\nnamespace onnxruntime {\nnamespace rocm {\ntemplate \nvoid Impl_MixedPrecisionScale(\n hipStream_t stream,\n const SrcT* input_data,\n const float* scale_data,\n DstT* output_data,\n size_t count);\n}\n} // namespace onnxruntime\n###" }, { "cuda": "\n\n\n#include \"orttraining/training_ops/cuda/math/scale.h\"\n#include \"orttraining/training_ops/cuda/math/scale_impl.h\"\nusing namespace ONNX_NAMESPACE;\nusing namespace onnxruntime::common;\nnamespace onnxruntime {\nnamespace cuda {\n#define REGISTER_SCALE_KERNEL_TYPED(T) ONNX_OPERATOR_TYPED_KERNEL_EX( Scale, kMSDomain, 1, T, kCudaExecutionProvider, (*KernelDefBuilder::Create()) .TypeConstraint(\"T\", DataTypeImpl::GetTensorType()) .TypeConstraint(\"ScaleT\", {DataTypeImpl::GetTensorType(), DataTypeImpl::GetTensorType(), DataTypeImpl::GetTensorType(), DataTypeImpl::GetTensorType(), DataTypeImpl::GetTensorType()}) .InputMemoryType(OrtMemTypeCPUInput, 1), Scale);\ntemplate \nstruct GetScaleValueImpl {\n void operator()(const Tensor* scale, float& scale_value) const {\n ORT_ENFORCE(scale->Shape().Size() == 1, \"Scale input should have a single value.\");\n scale_value = static_cast(*(scale->template Data()));\n ORT_ENFORCE(scale_value != 0.0f, \"Scale value must not be 0.\");\n }\n};\ntemplate \nScale::Scale(const OpKernelInfo& info) : CudaKernel(info) {\n int64_t scale_down;\n info.GetAttrOrDefault(\"scale_down\", &scale_down, static_cast(0));\n scale_down_ = (scale_down != 0);\n}\ntemplate \nStatus Scale::ComputeInternal(OpKernelContext* context) const {\n typedef typename ToCudaType::MappedType CudaT;\n float scale_value;\n auto scale_tensor = context->Input(1);\n utils::MLTypeCallDispatcher t_disp(scale_tensor->GetElementType());\n t_disp.Invoke(scale_tensor, scale_value);\n if (scale_down_) {\n scale_value = 1.0f / scale_value;\n }\n auto lhs_tensor = context->Input(0);\n auto output_tensor = context->Output(0, lhs_tensor->Shape());\n Impl_Scale(\n Stream(context), reinterpret_cast(lhs_tensor->template Data()), scale_value, reinterpret_cast(output_tensor->template MutableData()), output_tensor->Shape().Size());\n return Status::OK();\n}\nREGISTER_SCALE_KERNEL_TYPED(MLFloat16)\nREGISTER_SCALE_KERNEL_TYPED(float)\nREGISTER_SCALE_KERNEL_TYPED(double)\ntemplate Status Scale::ComputeInternal(OpKernelContext* context) const;\ntemplate Status Scale::ComputeInternal(OpKernelContext* context) const;\ntemplate Status Scale::ComputeInternal(OpKernelContext* context) const;\n} \n} \n\n###", "hip": " \n\n#include \"orttraining/training_ops/rocm/math/scale.h\"\n#include \"orttraining/training_ops/rocm/math/scale_impl.h\"\nusing namespace ONNX_NAMESPACE;\nusing namespace onnxruntime::common;\nnamespace onnxruntime {\nnamespace rocm {\n#define REGISTER_SCALE_KERNEL_TYPED(T) ONNX_OPERATOR_TYPED_KERNEL_EX( Scale, kMSDomain, 1, T, kRocmExecutionProvider, (*KernelDefBuilder::Create()) .TypeConstraint(\"T\", DataTypeImpl::GetTensorType()) .TypeConstraint(\"ScaleT\", {DataTypeImpl::GetTensorType(), DataTypeImpl::GetTensorType(), DataTypeImpl::GetTensorType(), DataTypeImpl::GetTensorType(), DataTypeImpl::GetTensorType()}) .InputMemoryType(OrtMemTypeCPUInput, 1), Scale);\ntemplate \nstruct GetScaleValueImpl {\n void operator()(const Tensor* scale, float& scale_value) const {\n ORT_ENFORCE(scale->Shape().Size() == 1, \"Scale input should have a single value.\");\n scale_value = static_cast(*(scale->template Data()));\n ORT_ENFORCE(scale_value != 0.0f, \"Scale value must not be 0.\");\n }\n};\ntemplate \nScale::Scale(const OpKernelInfo& info) : RocmKernel(info) {\n int64_t scale_down;\n info.GetAttrOrDefault(\"scale_down\", &scale_down, static_cast(0));\n scale_down_ = (scale_down != 0);\n}\ntemplate \nStatus Scale::ComputeInternal(OpKernelContext* context) const {\n typedef typename ToHipType::MappedType HipT;\n float scale_value;\n auto scale_tensor = context->Input(1);\n utils::MLTypeCallDispatcher t_disp(scale_tensor->GetElementType());\n t_disp.Invoke(scale_tensor, scale_value);\n if (scale_down_) {\n scale_value = 1.0f / scale_value;\n }\n auto lhs_tensor = context->Input(0);\n auto output_tensor = context->Output(0, lhs_tensor->Shape());\n Impl_Scale(\n Stream(context), reinterpret_cast(lhs_tensor->template Data()), scale_value, reinterpret_cast(output_tensor->template MutableData()), output_tensor->Shape().Size());\n return Status::OK();\n}\nREGISTER_SCALE_KERNEL_TYPED(MLFloat16)\nREGISTER_SCALE_KERNEL_TYPED(float)\nREGISTER_SCALE_KERNEL_TYPED(double)\ntemplate Status Scale::ComputeInternal(OpKernelContext* context) const;\ntemplate Status Scale::ComputeInternal(OpKernelContext* context) const;\ntemplate Status Scale::ComputeInternal(OpKernelContext* context) const;\n} \n} ###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nclass Scale final : public CudaKernel {\n public:\n Scale(const OpKernelInfo& info);\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n bool scale_down_;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nclass Scale final : public RocmKernel {\n public:\n Scale(const OpKernelInfo& info);\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n bool scale_down_;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"orttraining/training_ops/cuda/math/scale_impl.h\"\n#include \"core/providers/cuda/cu_inc/common.cuh\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \n__global__ void _Scale(\n const T* input_data,\n const T scale_value,\n T* output_data,\n CUDA_LONG N) {\n CUDA_LONG start = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x;\n T input_value[NumElementsPerThread];\n CUDA_LONG id = start;\n#pragma unroll\n for (int i = 0; i < NumElementsPerThread; i++) {\n if (id < N) {\n input_value[i] = input_data[id];\n id += NumThreadsPerBlock;\n }\n }\n\n id = start;\n#pragma unroll\n for (int i = 0; i < NumElementsPerThread; i++) {\n if (id < N) {\n output_data[id] = input_value[i] * scale_value;\n id += NumThreadsPerBlock;\n }\n }\n}\n\ntemplate \nvoid Impl_Scale(\n cudaStream_t stream,\n const T* input_data,\n const float scale_value,\n T* output_data,\n size_t count) {\n int blocksPerGrid = static_cast(CeilDiv(count, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread));\n CUDA_LONG N = static_cast(count);\n _Scale<<>>(\n input_data,\n static_cast(scale_value),\n output_data,\n N);\n}\n\n#define SPECIALIZE_SCALE_IMPL(T) \\\ntemplate void Impl_Scale( \\\n cudaStream_t stream, \\\n const T* input_data, \\\n const float scale_value, \\\n T* output_data, \\\n size_t count);\n\nSPECIALIZE_SCALE_IMPL(half)\nSPECIALIZE_SCALE_IMPL(float)\nSPECIALIZE_SCALE_IMPL(double)\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " #include \"hip/hip_runtime.h\"\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"orttraining/training_ops/rocm/math/scale_impl.h\"\n#include \"core/providers/rocm/cu_inc/common.cuh\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \n__global__ void _Scale(\n const T* input_data,\n const T scale_value,\n T* output_data,\n HIP_LONG N) {\n HIP_LONG start = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x;\n T input_value[NumElementsPerThread];\n HIP_LONG id = start;\n#pragma unroll\n for (int i = 0; i < NumElementsPerThread; i++) {\n if (id < N) {\n input_value[i] = input_data[id];\n id += NumThreadsPerBlock;\n }\n }\n\n id = start;\n#pragma unroll\n for (int i = 0; i < NumElementsPerThread; i++) {\n if (id < N) {\n output_data[id] = input_value[i] * scale_value;\n id += NumThreadsPerBlock;\n }\n }\n}\n\ntemplate \nvoid Impl_Scale(\n hipStream_t stream,\n const T* input_data,\n const float scale_value,\n T* output_data,\n size_t count) {\n int blocksPerGrid = static_cast(CeilDiv(count, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread));\n HIP_LONG N = static_cast(count);\n _Scale<<>>(\n input_data,\n static_cast(scale_value),\n output_data,\n N);\n}\n\n#define SPECIALIZE_SCALE_IMPL(T) \\\ntemplate void Impl_Scale( \\\n hipStream_t stream, \\\n const T* input_data, \\\n const float scale_value, \\\n T* output_data, \\\n size_t count);\n\nSPECIALIZE_SCALE_IMPL(half)\nSPECIALIZE_SCALE_IMPL(float)\nSPECIALIZE_SCALE_IMPL(double)\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nvoid Impl_Scale(\n cudaStream_t stream,\n const T* input_data,\n const float scale_value,\n T* output_data,\n size_t count);\n}\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nvoid Impl_Scale(\n hipStream_t stream,\n const T* input_data,\n const float scale_value,\n T* output_data,\n size_t count);\n}\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass SoftmaxDropoutGrad final : public CudaKernel {\n public:\n SoftmaxDropoutGrad(const OpKernelInfo& info) : CudaKernel(info) {\n info.GetAttrOrDefault(\"axis\", &axis_, static_cast(1));\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n int64_t axis_;\n static constexpr float default_ratio_ = 0.5f;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass SoftmaxDropoutGrad final : public RocmKernel {\n public:\n SoftmaxDropoutGrad(const OpKernelInfo& info) : RocmKernel(info) {\n info.GetAttrOrDefault(\"axis\", &axis_, static_cast(1));\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n int64_t axis_;\n static constexpr float default_ratio_ = 0.5f;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\ntemplate \nclass Gemm final : public CudaKernel {\n using Base = CudaKernel;\n\n public:\n Gemm(const OpKernelInfo& info) : CudaKernel(info) {\n int64_t temp;\n ORT_ENFORCE(info.GetAttr(\"transA\", &temp).IsOK());\n trans_A_ = (temp != 0);\n\n ORT_ENFORCE(info.GetAttr(\"transB\", &temp).IsOK());\n trans_B_ = (temp != 0);\n\n ORT_ENFORCE(info.GetAttr(\"alpha\", &alpha_).IsOK());\n ORT_ENFORCE(info.GetAttr(\"beta\", &beta_).IsOK());\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n bool trans_A_;\n bool trans_B_;\n float alpha_;\n float beta_;\n};\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\ntemplate \nclass Gemm final : public RocmKernel {\n using Base = RocmKernel;\n\n public:\n Gemm(const OpKernelInfo& info) : RocmKernel(info) {\n int64_t temp;\n ORT_ENFORCE(info.GetAttr(\"transA\", &temp).IsOK());\n trans_A_ = (temp != 0);\n\n ORT_ENFORCE(info.GetAttr(\"transB\", &temp).IsOK());\n trans_B_ = (temp != 0);\n\n ORT_ENFORCE(info.GetAttr(\"alpha\", &alpha_).IsOK());\n ORT_ENFORCE(info.GetAttr(\"beta\", &beta_).IsOK());\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n bool trans_A_;\n bool trans_B_;\n float alpha_;\n float beta_;\n};\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/cuda_common.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nStatus SoftmaxDropoutGradImpl(cudaStream_t stream, cudnnHandle_t cudnn_handle, T* input_grad_data,\n const T* output_grad_data, const bool* mask_data, const T* softmax_output_data,\n int element_count, int batch_count, const float ratio);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/rocm_common.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nStatus SoftmaxDropoutGradImpl(hipStream_t stream, miopenHandle_t miopen_handle, T* input_grad_data,\n const T* output_grad_data, const bool* mask_data, const T* softmax_output_data,\n int element_count, int batch_count, const float ratio);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass SoftmaxGrad final : public CudaKernel {\n public:\n SoftmaxGrad(const OpKernelInfo& info) : CudaKernel{info} {\n const auto& op_type = info.node().OpType();\n is_since_opset_13_ = (op_type == \"SoftmaxGrad_13\" || op_type == \"LogSoftmaxGrad_13\");\n info.GetAttrOrDefault(\"axis\", &axis_, static_cast(is_since_opset_13_ ? -1 : 1));\n is_log_softmax_ = (op_type == \"LogSoftmaxGrad\" || op_type == \"LogSoftmaxGrad_13\");\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n int64_t axis_;\n bool is_log_softmax_;\n bool is_since_opset_13_;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass SoftmaxGrad final : public RocmKernel {\n public:\n SoftmaxGrad(const OpKernelInfo& info) : RocmKernel{info} {\n const auto& op_type = info.node().OpType();\n is_since_opset_13_ = (op_type == \"SoftmaxGrad_13\" || op_type == \"LogSoftmaxGrad_13\");\n info.GetAttrOrDefault(\"axis\", &axis_, static_cast(is_since_opset_13_ ? -1 : 1));\n is_log_softmax_ = (op_type == \"LogSoftmaxGrad\" || op_type == \"LogSoftmaxGrad_13\");\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n int64_t axis_;\n bool is_log_softmax_;\n bool is_since_opset_13_;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/cuda_common.h\"\n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\ntemplate \nStatus SoftmaxGradImpl(cudaStream_t stream, cudnnHandle_t cudnn_handle, T* input_grad, const T* output_grad,\n const T* softmax_output, int element_count, int batch_count, bool is_log_softmax);\n}\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/rocm_common.h\"\n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\ntemplate \nStatus SoftmaxGradImpl(hipStream_t stream, miopenHandle_t miopen_handle, T* input_grad, const T* output_grad,\n const T* softmax_output, int element_count, int batch_count, bool is_log_softmax);\n}\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nclass DropoutGrad final : public CudaKernel {\n public:\n DropoutGrad(const OpKernelInfo& info) : CudaKernel(info) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n static constexpr float default_ratio_ = 0.5f;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nclass DropoutGrad final : public RocmKernel {\n public:\n DropoutGrad(const OpKernelInfo& info) : RocmKernel(info) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n static constexpr float default_ratio_ = 0.5f;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nvoid DropoutGradientKernelImpl(cudaStream_t stream, const int64_t N, const T* dY_data, const void* mask_data,\n const float ratio, T* dX_data, bool use_bitmask);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nvoid DropoutGradientKernelImpl(hipStream_t stream, const int64_t N, const T* dY_data, const void* mask_data,\n const float ratio, T* dX_data, bool use_bitmask);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\ntemplate \nclass LayerNormGrad final : public CudaKernel {\n public:\n LayerNormGrad(const OpKernelInfo& op_kernel_info);\n Status ComputeInternal(OpKernelContext* ctx) const override;\n\n private:\n int64_t axis_;\n};\n\ntemplate \nclass InvertibleLayerNormGrad final : public CudaKernel {\n public:\n InvertibleLayerNormGrad(const OpKernelInfo& op_kernel_info);\n Status ComputeInternal(OpKernelContext* ctx) const override;\n\n private:\n int64_t axis_;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n###", "hip": " #pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\ntemplate \nclass LayerNormGrad final : public RocmKernel {\n public:\n LayerNormGrad(const OpKernelInfo& op_kernel_info);\n Status ComputeInternal(OpKernelContext* ctx) const override;\n\n private:\n int64_t axis_;\n};\n\ntemplate \nclass InvertibleLayerNormGrad final : public RocmKernel {\n public:\n InvertibleLayerNormGrad(const OpKernelInfo& op_kernel_info);\n Status ComputeInternal(OpKernelContext* ctx) const override;\n\n private:\n int64_t axis_;\n};\n\n} // namespace rocm\n} // namespace onnxruntime###" }, { "cuda": "\n/**\n * Copyright (c) 2016-present, Facebook, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n//\n// Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.\n// NVIDIA/apex is licensed under the\n// BSD 3 - Clause \"New\" or \"Revised\" License\n//\n\n/* Modifications Copyright (c) Microsoft. */\n\n#pragma once\n#include \"core/providers/cuda/cuda_common.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nvoid HostLayerNormGradient(\n const cudaDeviceProp& prop,\n cudaStream_t stream,\n const V* dout,\n const T* input,\n const V* output,\n const V* gamma,\n const V* beta,\n const U* mean,\n const U* invvar,\n int64_t n1,\n int64_t n2,\n T* grad_input,\n V* grad_gamma,\n V* grad_beta,\n U* part_grad_gamma,\n U* part_grad_beta,\n const int part_size);\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " /**\n * Copyright (c) 2016-present, Facebook, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n//\n// Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.\n// NVIDIA/apex is licensed under the\n// BSD 3 - Clause \"New\" or \"Revised\" License\n//\n\n/* Modifications Copyright (c) Microsoft. */\n\n#pragma once\n#include \"core/providers/rocm/rocm_common.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nvoid HostLayerNormGradient(\n const hipDeviceProp_t& prop,\n hipStream_t stream,\n const V* dout,\n const T* input,\n const V* output,\n const V* gamma,\n const V* beta,\n const U* mean,\n const U* invvar,\n int64_t n1,\n int64_t n2,\n T* grad_input,\n V* grad_gamma,\n V* grad_beta,\n U* part_grad_gamma,\n U* part_grad_beta,\n const int part_size);\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\ntemplate \nclass AdamOptimizer final : public CudaKernel {\n public:\n AdamOptimizer(const OpKernelInfo& info) : CudaKernel(info) {\n info.GetAttrOrDefault(\"alpha\", &alpha_, 0.9f);\n info.GetAttrOrDefault(\"beta\", &beta_, 0.999f);\n info.GetAttrOrDefault(\"lambda\", &lambda_, 0.0f);\n info.GetAttrOrDefault(\"epsilon\", &epsilon_, 1e-8f);\n info.GetAttrOrDefault(\"max_norm_clip\", &max_norm_clip_, 1.0f);\n\n int64_t tmp_flag = static_cast(0);\n ORT_ENFORCE(info.GetAttr(\"do_bias_correction\", &tmp_flag).IsOK(), \"Missing/Invalid do_bias_correction\");\n ORT_ENFORCE(tmp_flag == 0 || tmp_flag == 1, \"do_bias_correction must be either 0 or 1.\");\n ORT_ENFORCE(max_norm_clip_ != 0, \"max_norm_clip must NOT be 0.\");\n do_bias_correction_ = tmp_flag != 0 ? true : false;\n info.GetAttrOrDefault(\"weight_decay_mode\", &weight_decay_mode_, static_cast(0));\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n float alpha_;\n float beta_;\n float lambda_;\n float epsilon_;\n float max_norm_clip_;\n bool do_bias_correction_;\n int64_t weight_decay_mode_;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\ntemplate \nclass AdamOptimizer final : public RocmKernel {\n public:\n AdamOptimizer(const OpKernelInfo& info) : RocmKernel(info) {\n info.GetAttrOrDefault(\"alpha\", &alpha_, 0.9f);\n info.GetAttrOrDefault(\"beta\", &beta_, 0.999f);\n info.GetAttrOrDefault(\"lambda\", &lambda_, 0.0f);\n info.GetAttrOrDefault(\"epsilon\", &epsilon_, 1e-8f);\n info.GetAttrOrDefault(\"max_norm_clip\", &max_norm_clip_, 1.0f);\n\n int64_t tmp_flag = static_cast(0);\n ORT_ENFORCE(info.GetAttr(\"do_bias_correction\", &tmp_flag).IsOK(), \"Missing/Invalid do_bias_correction\");\n ORT_ENFORCE(tmp_flag == 0 || tmp_flag == 1, \"do_bias_correction must be either 0 or 1.\");\n ORT_ENFORCE(max_norm_clip_ != 0, \"max_norm_clip must NOT be 0.\");\n do_bias_correction_ = tmp_flag != 0 ? true : false;\n info.GetAttrOrDefault(\"weight_decay_mode\", &weight_decay_mode_, static_cast(0));\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n float alpha_;\n float beta_;\n float lambda_;\n float epsilon_;\n float max_norm_clip_;\n bool do_bias_correction_;\n int64_t weight_decay_mode_;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n#include \n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nvoid AdamOptimizerImpl(\n cudaStream_t stream,\n const T1* eta,\n const T2 update_count,\n const T3* weights,\n const T_GRAD* grads,\n const T4* moment_1,\n const T4* moment_2,\n const T3* loss_scale,\n const T_GRAD_NORM* grad_norm,\n const float alpha,\n const float beta,\n const float lambda,\n const float epsilon,\n const float max_norm,\n const bool do_bias_correction,\n const int64_t weight_decay_mode,\n T4* moment_1_out,\n T4* moment_2_out,\n T3* weights_out,\n T_GRAD* grads_out,\n T_MIXED_PRECISION_FP* mixed_precision_weights_out,\n size_t count);\n}\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n#include \n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nvoid AdamOptimizerImpl(\n hipStream_t stream,\n const T1* eta,\n const T2 update_count,\n const T3* weights,\n const T_GRAD* grads,\n const T4* moment_1,\n const T4* moment_2,\n const T3* loss_scale,\n const T_GRAD_NORM* grad_norm,\n const float alpha,\n const float beta,\n const float lambda,\n const float epsilon,\n const float max_norm,\n const bool do_bias_correction,\n const int64_t weight_decay_mode,\n T4* moment_1_out,\n T4* moment_2_out,\n T3* weights_out,\n T_GRAD* grads_out,\n T_MIXED_PRECISION_FP* mixed_precision_weights_out,\n size_t count);\n}\n} // namespace onnxruntime\n###" }, { "cuda": "\n\n\n#include \n#include \n#include \"core/providers/shared_library/provider_api.h\"\n#include \"orttraining/training_ops/cuda/optimizer/adamw/adamw.h\"\n#include \"orttraining/training_ops/cuda/optimizer/adamw/adamw_impl.h\"\n#include \"orttraining/training_ops/cuda/optimizer/common.h\"\nnamespace onnxruntime {\nnamespace cuda {\nONNX_OPERATOR_KERNEL_EX(\n AdamWOptimizer, kMSDomain, 1, kCudaExecutionProvider, (*KernelDefBuilder::Create())\n .InputMemoryType(OrtMemTypeCPUInput, 0)\n .InputMemoryType(OrtMemTypeCPUInput, 1)\n .InputMemoryType(OrtMemTypeCPUInput, 6)\n .OutputMemoryType(OrtMemTypeCPUOutput, 0)\n .Alias(2, 1) \n .Alias(4, 2) \n .Alias(5, 3) \n .TypeConstraint(\"T1\", DataTypeImpl::GetTensorType())\n .TypeConstraint(\"T2\", DataTypeImpl::GetTensorType())\n .TypeConstraint(\"S_WEIGHT\", DataTypeImpl::AllFixedSizeSequenceTensorTypes())\n .TypeConstraint(\"S_GRAD\", DataTypeImpl::AllFixedSizeSequenceTensorTypes())\n .TypeConstraint(\"S_MOMENT\", DataTypeImpl::AllFixedSizeSequenceTensorTypes()), AdamWOptimizer);\nStatus AdamWOptimizer::ComputeInternal(OpKernelContext* ctx) const {\n AdamWOptimizerBase::Prepare p;\n ORT_RETURN_IF_ERROR(PrepareForCompute(ctx, p));\n int64_t* updated_flag_ptr = p.updated_flag->template MutableData();\n \n const Tensor* update_signal = ctx->Input(6);\n if (update_signal == nullptr || *update_signal->template Data()) {\n typedef typename ToCudaType::MappedType CudaT_FLOAT;\n typedef AdamWMTAFunctor TFunctor;\n TFunctor functor;\n const float* lr_ptr = p.learning_rate->template Data();\n const int64_t* step_ptr = p.step->template Data();\n ORT_ENFORCE(lr_ptr && step_ptr);\n launch_multi_tensor_functor(\n Stream(ctx), MTA_ADAMW_CHUNK_SIZE, p.grouped_tensor_sizes, p.grouped_tensor_pointers, functor, alpha_, beta_, epsilon_, *lr_ptr, weight_decay_, adam_mode_, correct_bias_, *step_ptr);\n *updated_flag_ptr = 1;\n } else {\n *updated_flag_ptr = 0;\n }\n if (p.updated_weights != nullptr) {\n ORT_RETURN_IF_ERROR(CopyIfNotSameCUDABuffer(ctx, p.num_of_weights, p.weights, p.updated_weights));\n }\n if (p.updated_momentums_1 != nullptr) {\n ORT_RETURN_IF_ERROR(CopyIfNotSameCUDABuffer(ctx, p.num_of_weights, p.momentums_1, p.updated_momentums_1));\n }\n if (p.updated_momentums_2 != nullptr) {\n ORT_RETURN_IF_ERROR(CopyIfNotSameCUDABuffer(ctx, p.num_of_weights, p.momentums_2, p.updated_momentums_2));\n }\n return Status::OK();\n}\n} \n} \n\n###", "hip": " \n\n#include \n#include \n#include \"core/providers/shared_library/provider_api.h\"\n#include \"orttraining/training_ops/rocm/optimizer/adamw/adamw.h\"\n#include \"orttraining/training_ops/rocm/optimizer/adamw/adamw_impl.h\"\n#include \"orttraining/training_ops/rocm/optimizer/common.h\"\nnamespace onnxruntime {\nnamespace rocm {\nONNX_OPERATOR_KERNEL_EX(\n AdamWOptimizer, kMSDomain, 1, kRocmExecutionProvider, (*KernelDefBuilder::Create())\n .InputMemoryType(OrtMemTypeCPUInput, 0)\n .InputMemoryType(OrtMemTypeCPUInput, 1)\n .InputMemoryType(OrtMemTypeCPUInput, 6)\n .OutputMemoryType(OrtMemTypeCPUOutput, 0)\n .Alias(2, 1) \n .Alias(4, 2) \n .Alias(5, 3) \n .TypeConstraint(\"T1\", DataTypeImpl::GetTensorType())\n .TypeConstraint(\"T2\", DataTypeImpl::GetTensorType())\n .TypeConstraint(\"S_WEIGHT\", DataTypeImpl::AllFixedSizeSequenceTensorTypes())\n .TypeConstraint(\"S_GRAD\", DataTypeImpl::AllFixedSizeSequenceTensorTypes())\n .TypeConstraint(\"S_MOMENT\", DataTypeImpl::AllFixedSizeSequenceTensorTypes()), AdamWOptimizer);\nStatus AdamWOptimizer::ComputeInternal(OpKernelContext* ctx) const {\n AdamWOptimizerBase::Prepare p;\n ORT_RETURN_IF_ERROR(PrepareForCompute(ctx, p));\n int64_t* updated_flag_ptr = p.updated_flag->template MutableData();\n \n const Tensor* update_signal = ctx->Input(6);\n if (update_signal == nullptr || *update_signal->template Data()) {\n typedef typename ToHipType::MappedType HipT_FLOAT;\n typedef AdamWMTAFunctor TFunctor;\n TFunctor functor;\n const float* lr_ptr = p.learning_rate->template Data();\n const int64_t* step_ptr = p.step->template Data();\n ORT_ENFORCE(lr_ptr && step_ptr);\n launch_multi_tensor_functor(\n Stream(ctx), MTA_ADAMW_CHUNK_SIZE, p.grouped_tensor_sizes, p.grouped_tensor_pointers, functor, alpha_, beta_, epsilon_, *lr_ptr, weight_decay_, adam_mode_, correct_bias_, *step_ptr);\n *updated_flag_ptr = 1;\n } else {\n *updated_flag_ptr = 0;\n }\n if (p.updated_weights != nullptr) {\n ORT_RETURN_IF_ERROR(CopyIfNotSameROCMBuffer(ctx, p.num_of_weights, p.weights, p.updated_weights));\n }\n if (p.updated_momentums_1 != nullptr) {\n ORT_RETURN_IF_ERROR(CopyIfNotSameROCMBuffer(ctx, p.num_of_weights, p.momentums_1, p.updated_momentums_1));\n }\n if (p.updated_momentums_2 != nullptr) {\n ORT_RETURN_IF_ERROR(CopyIfNotSameROCMBuffer(ctx, p.num_of_weights, p.momentums_2, p.updated_momentums_2));\n }\n return Status::OK();\n}\n} \n} ###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\ntemplate \nclass MatMul final : public CudaKernel {\n using Base = CudaKernel;\n\n public:\n MatMul(const OpKernelInfo& info)\n : CudaKernel(info),\n alpha_{info.GetAttrOrDefault(\"alpha\", 1.0f)},\n trans_A_{info.GetAttrOrDefault(\"transA\", 0) != 0},\n trans_B_{info.GetAttrOrDefault(\"transB\", 0) != 0},\n trans_batch_a_{info.GetAttrOrDefault(\"transBatchA\", 0) != 0},\n trans_batch_b_{info.GetAttrOrDefault(\"transBatchB\", 0) != 0} {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n const float alpha_;\n const bool trans_A_;\n const bool trans_B_;\n const bool trans_batch_a_;\n const bool trans_batch_b_;\n};\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\ntemplate \nclass MatMul final : public RocmKernel {\n using Base = RocmKernel;\n\n public:\n MatMul(const OpKernelInfo& info)\n : RocmKernel(info),\n alpha_{info.GetAttrOrDefault(\"alpha\", 1.0f)},\n trans_A_{info.GetAttrOrDefault(\"transA\", 0) != 0},\n trans_B_{info.GetAttrOrDefault(\"transB\", 0) != 0},\n trans_batch_a_{info.GetAttrOrDefault(\"transBatchA\", 0) != 0},\n trans_batch_b_{info.GetAttrOrDefault(\"transBatchB\", 0) != 0} {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n const float alpha_;\n const bool trans_A_;\n const bool trans_B_;\n const bool trans_batch_a_;\n const bool trans_batch_b_;\n};\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n#include \"core/common/common.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"orttraining/training_ops/cpu/optimizer/adamw/adamwbase.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass AdamWOptimizer final : public CudaKernel, public contrib::AdamWOptimizerBase {\n public:\n AdamWOptimizer(const OpKernelInfo& info) : CudaKernel(info), contrib::AdamWOptimizerBase(info) {\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n#include \"core/common/common.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"orttraining/training_ops/cpu/optimizer/adamw/adamwbase.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass AdamWOptimizer final : public RocmKernel, public contrib::AdamWOptimizerBase {\n public:\n AdamWOptimizer(const OpKernelInfo& info) : RocmKernel(info), contrib::AdamWOptimizerBase(info) {\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/multi_tensor/common.cuh\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\n#define MTA_ADAMW_GROUP_SIZE 4\n#define MTA_ADAMW_CHUNK_SIZE 2048 * 32\n\ntemplate \nstruct AdamWMTAFunctor {\n void operator()(cudaStream_t stream,\n ChunkGroup chunks,\n const float alpha,\n const float beta,\n const float epsilon,\n const float lr,\n const float decay,\n const int64_t adam_mode,\n const int64_t correct_bias,\n const int64_t update_count);\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/multi_tensor/common.cuh\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\n#define MTA_ADAMW_GROUP_SIZE 4\n#define MTA_ADAMW_CHUNK_SIZE 2048 * 32\n\ntemplate \nstruct AdamWMTAFunctor {\n void operator()(hipStream_t stream,\n ChunkGroup chunks,\n const float alpha,\n const float beta,\n const float epsilon,\n const float lr,\n const float decay,\n const int64_t adam_mode,\n const int64_t correct_bias,\n const int64_t update_count);\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n#include \"core/common/common.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass InplaceClipGradNorm final : public CudaKernel {\n public:\n InplaceClipGradNorm(const OpKernelInfo& info) : CudaKernel(info) {\n info.GetAttrOrDefault(\"max_norm\", &max_norm_, 1.0f);\n info.GetAttrOrDefault(\"norm_type\", &norm_type_, std::string(\"fro\"));\n ORT_ENFORCE(norm_type_ == \"fro\", \"Given norm type \", norm_type_, \" is not supported for InplaceClipGradNorm.\");\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n float max_norm_;\n std::string norm_type_;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n#include \"core/common/common.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass InplaceClipGradNorm final : public RocmKernel {\n public:\n InplaceClipGradNorm(const OpKernelInfo& info) : RocmKernel(info) {\n info.GetAttrOrDefault(\"max_norm\", &max_norm_, 1.0f);\n info.GetAttrOrDefault(\"norm_type\", &norm_type_, std::string(\"fro\"));\n ORT_ENFORCE(norm_type_ == \"fro\", \"Given norm type \", norm_type_, \" is not supported for InplaceClipGradNorm.\");\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n float max_norm_;\n std::string norm_type_;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \n#include \"core/providers/cuda/cuda_common.h\"\n#include \"orttraining/training_ops/cuda/optimizer/clip_grad_norm/clip_grad_norm_impl.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \n__global__ void ClipGradNorm(\n ChunkGroup chunks,\n const float* total_norm,\n const float epsilon,\n const float max_norm) {\n const int tensor_idx = chunks.block_index_to_tensor_group_index[blockIdx.x];\n const int tensor_size = chunks.tensor_sizes[tensor_idx];\n\n const int chunk_start_idx = chunks.block_index_to_chunk_start_index[blockIdx.x];\n // chunk_size is chunks.chunk_size if the loaded chunk is full. Otherwise (this\n // chunk is the last one in the source tensor), the actual size is determined\n // by the bound of the source tensor.\n const int chunk_size = min(tensor_size, chunk_start_idx + chunks.chunk_size) - chunk_start_idx;\n\n T* gradients_chunk_ptr = static_cast(chunks.tensor_ptrs[0][tensor_idx]) + chunk_start_idx;\n\n#pragma unroll 4\n for (int i = threadIdx.x; i < chunk_size; i += blockDim.x) {\n float clip_coefficient = max_norm / (*total_norm + epsilon);\n gradients_chunk_ptr[i] = static_cast(gradients_chunk_ptr[i]) *\n static_cast(fminf(clip_coefficient, 1.0f));\n }\n}\n\ntemplate \nvoid ClipGradNormFunctor::operator()(\n cudaStream_t stream,\n ChunkGroup chunks,\n const float* total_norm,\n const float epsilon,\n const float max_norm) {\n const int num_blocks_per_grid = chunks.chunk_count;\n const int num_threads_per_block = ChunkGroup::thread_count_per_block;\n\n ClipGradNorm<<>>(chunks, total_norm, epsilon, max_norm);\n}\n\n#define SPECIALIZE_CLIPGRADNORM_FUNCTOR(T) \\\n template void ClipGradNormFunctor::operator()(cudaStream_t stream, \\\n ChunkGroup chunks, \\\n const float* total_norm, \\\n const float epsilon, \\\n const float max_norm); \\\n \\\n template __global__ void ClipGradNorm(ChunkGroup chunks, \\\n const float* total_norm, \\\n const float epsilon, \\\n const float max_norm);\n\nSPECIALIZE_CLIPGRADNORM_FUNCTOR(float);\n\n#undef SPECIALIZE_CLIPGRADNORM_FUNCTOR\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " #include \"hip/hip_runtime.h\"\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \n#include \"core/providers/rocm/rocm_common.h\"\n#include \"orttraining/training_ops/rocm/optimizer/clip_grad_norm/clip_grad_norm_impl.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \n__global__ void ClipGradNorm(\n ChunkGroup chunks,\n const float* total_norm,\n const float epsilon,\n const float max_norm) {\n const int tensor_idx = chunks.block_index_to_tensor_group_index[blockIdx.x];\n const int tensor_size = chunks.tensor_sizes[tensor_idx];\n\n const int chunk_start_idx = chunks.block_index_to_chunk_start_index[blockIdx.x];\n // chunk_size is chunks.chunk_size if the loaded chunk is full. Otherwise (this\n // chunk is the last one in the source tensor), the actual size is determined\n // by the bound of the source tensor.\n const int chunk_size = min(tensor_size, chunk_start_idx + chunks.chunk_size) - chunk_start_idx;\n\n T* gradients_chunk_ptr = static_cast(chunks.tensor_ptrs[0][tensor_idx]) + chunk_start_idx;\n\n#pragma unroll 4\n for (int i = threadIdx.x; i < chunk_size; i += blockDim.x) {\n float clip_coefficient = max_norm / (*total_norm + epsilon);\n gradients_chunk_ptr[i] = static_cast(gradients_chunk_ptr[i]) *\n static_cast(fminf(clip_coefficient, 1.0f));\n }\n}\n\ntemplate \nvoid ClipGradNormFunctor::operator()(\n hipStream_t stream,\n ChunkGroup chunks,\n const float* total_norm,\n const float epsilon,\n const float max_norm) {\n const int num_blocks_per_grid = chunks.chunk_count;\n const int num_threads_per_block = ChunkGroup::thread_count_per_block;\n\n ClipGradNorm<<>>(chunks, total_norm, epsilon, max_norm);\n}\n\n#define SPECIALIZE_CLIPGRADNORM_FUNCTOR(T) \\\n template void ClipGradNormFunctor::operator()(hipStream_t stream, \\\n ChunkGroup chunks, \\\n const float* total_norm, \\\n const float epsilon, \\\n const float max_norm); \\\n \\\n template __global__ void ClipGradNorm(ChunkGroup chunks, \\\n const float* total_norm, \\\n const float epsilon, \\\n const float max_norm);\n\nSPECIALIZE_CLIPGRADNORM_FUNCTOR(float);\n\n#undef SPECIALIZE_CLIPGRADNORM_FUNCTOR\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/multi_tensor/common.cuh\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nconstexpr int ClipGradNormGroupSize = 1;\n\ntemplate \nstruct ClipGradNormFunctor {\n void operator()(cudaStream_t stream,\n ChunkGroup chunks,\n const float* l2_norm,\n const float epsilon,\n const float max_norm);\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/multi_tensor/common.cuh\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nconstexpr int ClipGradNormGroupSize = 1;\n\ntemplate \nstruct ClipGradNormFunctor {\n void operator()(hipStream_t stream,\n ChunkGroup chunks,\n const float* l2_norm,\n const float epsilon,\n const float max_norm);\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"orttraining/training_ops/cuda/optimizer/common.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nStatus CopyIfNotSameCUDABuffer(OpKernelContext* ctx, size_t number_of_values,\n const TensorSeq* values, TensorSeq* updated_values) {\n if (values != updated_values) {\n AllocatorPtr alloc;\n ORT_ENFORCE(ctx->GetTempSpaceAllocator(&alloc).IsOK(),\n \"CUDA CopyIfNotSameBuffer for tensor sequence: Unable to get an allocator.\");\n cudaStream_t cuda_stream = ctx->GetComputeStream()\n ? static_cast(ctx->GetComputeStream()->GetHandle())\n : nullptr;\n\n updated_values->SetType(values->DataType());\n updated_values->Reserve(number_of_values);\n for (size_t input_idx = 0; input_idx < number_of_values; ++input_idx) {\n const Tensor& source_tensor = values->Get(input_idx);\n std::unique_ptr target_tensor = Tensor::Create(source_tensor.DataType(), source_tensor.Shape(), alloc);\n\n CUDA_RETURN_IF_ERROR(cudaMemcpyAsync(target_tensor->MutableDataRaw(),\n source_tensor.DataRaw(),\n source_tensor.SizeInBytes(),\n cudaMemcpyDeviceToDevice, cuda_stream));\n\n updated_values->Add(std::move(*target_tensor)); // Add will check for type consistency\n }\n }\n return Status::OK();\n}\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"orttraining/training_ops/rocm/optimizer/common.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nStatus CopyIfNotSameROCMBuffer(OpKernelContext* ctx, size_t number_of_values,\n const TensorSeq* values, TensorSeq* updated_values) {\n if (values != updated_values) {\n AllocatorPtr alloc;\n ORT_ENFORCE(ctx->GetTempSpaceAllocator(&alloc).IsOK(),\n \"ROCM CopyIfNotSameBuffer for tensor sequence: Unable to get an allocator.\");\n hipStream_t hip_stream = ctx->GetComputeStream()\n ? static_cast(ctx->GetComputeStream()->GetHandle())\n : nullptr;\n\n updated_values->SetType(values->DataType());\n updated_values->Reserve(number_of_values);\n for (size_t input_idx = 0; input_idx < number_of_values; ++input_idx) {\n const Tensor& source_tensor = values->Get(input_idx);\n std::unique_ptr target_tensor = Tensor::Create(source_tensor.DataType(), source_tensor.Shape(), alloc);\n\n HIP_RETURN_IF_ERROR(hipMemcpyAsync(target_tensor->MutableDataRaw(),\n source_tensor.DataRaw(),\n source_tensor.SizeInBytes(),\n hipMemcpyDeviceToDevice, hip_stream));\n\n updated_values->Add(std::move(*target_tensor)); // Add will check for type consistency\n }\n }\n return Status::OK();\n}\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \n\nnamespace onnxruntime {\nnamespace cuda {\n\n// ---------------------------------------------------------------------------\n// _ComputeGradScale -- helper to calculate gradient scales based on global norms\n// ---------------------------------------------------------------------------\n\ntemplate \n__device__ __forceinline__ TFinalScale _ComputeGradScale(\n const TLossScale* loss_scale, // Scale of the gradient (called \"scaled_g_norm\" below)\n const TGradNorm* scaled_g_norm, // Scaled gradient norm is an optimizer input\n const TFinalScale max_g_norm) {\n const TFinalScale scale = loss_scale != nullptr ? TFinalScale(*loss_scale) : TFinalScale(1.f);\n const TFinalScale scaled_max_g_norm = TFinalScale(scale * max_g_norm);\n\n // This number is used to divide the scaled gradient before applying optimizers.\n TFinalScale scaled_g_scaling_factor = scale;\n if (scaled_g_norm != nullptr && TFinalScale(*scaled_g_norm) > scaled_max_g_norm) {\n scaled_g_scaling_factor = TFinalScale(*scaled_g_norm) / max_g_norm;\n }\n return scaled_g_scaling_factor;\n}\n} // namespace cuda\n} // namespace onnxruntime\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \n\nnamespace onnxruntime {\nnamespace rocm {\n\n// ---------------------------------------------------------------------------\n// _ComputeGradScale -- helper to calculate gradient scales based on global norms\n// ---------------------------------------------------------------------------\n\ntemplate \n__device__ __forceinline__ TFinalScale _ComputeGradScale(\n const TLossScale* loss_scale, // Scale of the gradient (called \"scaled_g_norm\" below)\n const TGradNorm* scaled_g_norm, // Scaled gradient norm is an optimizer input\n const TFinalScale max_g_norm) {\n const TFinalScale scale = loss_scale != nullptr ? TFinalScale(*loss_scale) : TFinalScale(1.f);\n const TFinalScale scaled_max_g_norm = TFinalScale(scale * max_g_norm);\n\n // This number is used to divide the scaled gradient before applying optimizers.\n TFinalScale scaled_g_scaling_factor = scale;\n if (scaled_g_norm != nullptr && TFinalScale(*scaled_g_norm) > scaled_max_g_norm) {\n scaled_g_scaling_factor = TFinalScale(*scaled_g_norm) / max_g_norm;\n }\n return scaled_g_scaling_factor;\n}\n} // namespace rocm\n} // namespace onnxruntime###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/cuda/cuda_common.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nStatus CopyIfNotSameBuffer(cudaStream_t stream, const Tensor& source_tensor, Tensor& target_tensor) {\n const T* source = source_tensor.template Data();\n T* target = target_tensor.template MutableData();\n if (target != source) {\n CUDA_RETURN_IF_ERROR(cudaMemcpyAsync(target, source, source_tensor.SizeInBytes(), cudaMemcpyDeviceToDevice,\n stream));\n }\n return Status::OK();\n}\n\nStatus CopyIfNotSameCUDABuffer(OpKernelContext* ctx, size_t number_of_values, const TensorSeq* values,\n TensorSeq* updated_values);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/rocm/rocm_common.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nStatus CopyIfNotSameBuffer(hipStream_t stream, const Tensor& source_tensor, Tensor& target_tensor) {\n const T* source = source_tensor.template Data();\n T* target = target_tensor.template MutableData();\n if (target != source) {\n HIP_RETURN_IF_ERROR(hipMemcpyAsync(target, source, source_tensor.SizeInBytes(), hipMemcpyDeviceToDevice,\n stream));\n }\n return Status::OK();\n}\n\nStatus CopyIfNotSameROCMBuffer(OpKernelContext* ctx, size_t number_of_values, const TensorSeq* values,\n TensorSeq* updated_values);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nclass ZeroGradient final : public CudaKernel {\n public:\n ZeroGradient(const OpKernelInfo& info) : CudaKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\ntemplate \nclass InPlaceAccumulator final : public CudaKernel {\n public:\n InPlaceAccumulator(const OpKernelInfo& info) : CudaKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\ntemplate \nclass InPlaceAccumulatorV2 final : public CudaKernel {\n public:\n InPlaceAccumulatorV2(const OpKernelInfo& info) : CudaKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nclass ZeroGradient final : public RocmKernel {\n public:\n ZeroGradient(const OpKernelInfo& info) : RocmKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\ntemplate \nclass InPlaceAccumulator final : public RocmKernel {\n public:\n InPlaceAccumulator(const OpKernelInfo& info) : RocmKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\ntemplate \nclass InPlaceAccumulatorV2 final : public RocmKernel {\n public:\n InPlaceAccumulatorV2(const OpKernelInfo& info) : RocmKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace rocm\n} // namespace onnxruntime###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n\nnamespace onnxruntime {\nnamespace cuda {\n// Implementation can be found in cuda file\ntemplate \nvoid InPlaceAccumulatorImpl(\n cudaStream_t stream,\n const T* gradient_buffer,\n const T_GRAD* gradient,\n T* accumulated_gradient,\n size_t count);\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n\nnamespace onnxruntime {\nnamespace rocm {\n// Implementation can be found in rocm file\ntemplate \nvoid InPlaceAccumulatorImpl(\n hipStream_t stream,\n const T* gradient_buffer,\n const T_GRAD* gradient,\n T* accumulated_gradient,\n size_t count);\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"matmul_integer.h\"\n#include \"core/providers/cpu/math/matmul_helper.h\"\n#include \"core/providers/cuda/cuda_common.h\"\n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nStatus ReduceRowSumOnMatrixA(cudaStream_t stream, const int8_t* matrix, int32_t* row_sum, const int8_t offset, const MatMulComputeHelper& helper);\nStatus ReduceColSumOnMatrixB(cudaStream_t stream, const int8_t* matrix, int32_t* col_sum, const int8_t offset, const MatMulComputeHelper& helper);\nStatus OffsetOutput(cudaStream_t stream,\n const int32_t* row_sum,\n const int32_t* col_sum,\n int32_t* output,\n const int8_t a_offset,\n const int8_t b_offset,\n const MatMulComputeHelper& helper);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"matmul_integer.h\"\n#include \"core/providers/cpu/math/matmul_helper.h\"\n#include \"core/providers/rocm/rocm_common.h\"\n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nStatus ReduceRowSumOnMatrixA(hipStream_t stream, const int8_t* matrix, int32_t* row_sum, const int8_t offset, const MatMulComputeHelper& helper);\nStatus ReduceColSumOnMatrixB(hipStream_t stream, const int8_t* matrix, int32_t* col_sum, const int8_t offset, const MatMulComputeHelper& helper);\nStatus OffsetOutput(hipStream_t stream,\n const int32_t* row_sum,\n const int32_t* col_sum,\n int32_t* output,\n const int8_t a_offset,\n const int8_t b_offset,\n const MatMulComputeHelper& helper);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nclass LambOptimizer final : public CudaKernel {\n public:\n LambOptimizer(const OpKernelInfo& info) : CudaKernel(info) {\n alpha_ = info.GetAttrsOrDefault(\"alpha\", std::vector(1024, 0.9f));\n beta_ = info.GetAttrsOrDefault(\"beta\", std::vector(1024, 0.999f));\n lambda_ = info.GetAttrsOrDefault(\"lambda\", std::vector(1024, 0.0f));\n epsilon_ = info.GetAttrsOrDefault(\"epsilon\", std::vector(1024, 1e-6f));\n max_norm_clip_ = info.GetAttrsOrDefault(\"max_norm_clip\", std::vector(1024, 1.0f));\n ORT_ENFORCE(info.GetAttr(\"ratio_min\", &ratio_min_).IsOK(), \"Missing/Invalid 'ratio_min' attribute value\");\n ORT_ENFORCE(info.GetAttr(\"ratio_max\", &ratio_max_).IsOK(), \"Missing/Invalid 'ratio_max' attribute value\");\n for (const auto& max_norm : max_norm_clip_) {\n ORT_ENFORCE(max_norm != 0, \"max_norm_clip must NOT be 0.\");\n }\n\n int64_t tmp_flag = static_cast(0);\n ORT_ENFORCE(info.GetAttr(\"do_bias_correction\", &tmp_flag).IsOK(), \"Missing/Invalid do_bias_correction\");\n ORT_ENFORCE(tmp_flag == 0 || tmp_flag == 1, \"do_bias_correction must be either 0 or 1.\");\n do_bias_correction_ = tmp_flag != 0 ? true : false;\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n std::vector alpha_;\n std::vector beta_;\n std::vector lambda_;\n std::vector epsilon_;\n std::vector max_norm_clip_;\n float ratio_min_;\n float ratio_max_;\n bool do_bias_correction_;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nclass LambOptimizer final : public RocmKernel {\n public:\n LambOptimizer(const OpKernelInfo& info) : RocmKernel(info) {\n alpha_ = info.GetAttrsOrDefault(\"alpha\", std::vector(1024, 0.9f));\n beta_ = info.GetAttrsOrDefault(\"beta\", std::vector(1024, 0.999f));\n lambda_ = info.GetAttrsOrDefault(\"lambda\", std::vector(1024, 0.0f));\n epsilon_ = info.GetAttrsOrDefault(\"epsilon\", std::vector(1024, 1e-6f));\n max_norm_clip_ = info.GetAttrsOrDefault(\"max_norm_clip\", std::vector(1024, 1.0f));\n ORT_ENFORCE(info.GetAttr(\"ratio_min\", &ratio_min_).IsOK(), \"Missing/Invalid 'ratio_min' attribute value\");\n ORT_ENFORCE(info.GetAttr(\"ratio_max\", &ratio_max_).IsOK(), \"Missing/Invalid 'ratio_max' attribute value\");\n for (const auto& max_norm : max_norm_clip_) {\n ORT_ENFORCE(max_norm != 0, \"max_norm_clip must NOT be 0.\");\n }\n\n int64_t tmp_flag = static_cast(0);\n ORT_ENFORCE(info.GetAttr(\"do_bias_correction\", &tmp_flag).IsOK(), \"Missing/Invalid do_bias_correction\");\n ORT_ENFORCE(tmp_flag == 0 || tmp_flag == 1, \"do_bias_correction must be either 0 or 1.\");\n do_bias_correction_ = tmp_flag != 0 ? true : false;\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n std::vector alpha_;\n std::vector beta_;\n std::vector lambda_;\n std::vector epsilon_;\n std::vector max_norm_clip_;\n float ratio_min_;\n float ratio_max_;\n bool do_bias_correction_;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n\n\n#pragma once\n#include \n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"core/providers/cuda/multi_tensor/common.cuh\"\n#include \"core/framework/stream_handles.h\"\nnamespace onnxruntime {\nnamespace cuda {\n\n\n\n\ntemplate \nvoid LambComputeDirection(\n cudaStream_t stream, const T1* weights, const T2* grads, const T3* moment_1, const T3* moment_2, const T1* loss_scale, const T_GRAD_NORM* grad_norm, float alpha, float beta, float lambda, float epsilon, float max_norm, float alpha_correction, float beta_correction, T2* update_direction, T3* moment_1_out, T3* moment_2_out, size_t count);\n\n\n\n\ntemplate \nvoid LambUpdate(\n cudaStream_t stream, const T1* eta, const float ratio_min, const float ratio_max, const T2* r_norm, const T2* w_norm, const T2* weights, const T3* update_direction, T2* weights_out, T3* gradients_out, T_MIXED_PRECISION_FP* mixed_precision_weights_out, size_t count);\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ntemplate \nstruct LambMultiTensorComputeDirectionFunctor {\n void operator()(\n cudaStream_t stream, ChunkGroup<6> chunk_group, const T1* loss_scale, const T_GRAD_NORM* grad_norm, const float lambda, const float alpha, const float beta, const float epsilon, const float max_norm, const float alpha_correction, const float beta_correction);\n};\n\n\n\n\n\n\n\n\n\n\n\n\n\ntemplate \nstruct LambMultiTensorReductionFunctor {\n void operator()(\n cudaStream_t stream, ChunkGroup<4> chunk_group, const CudaKernel& kernel, void* reduction_buffer, size_t reduction_buffer_size, onnxruntime::Stream* ort_stream);\n};\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nstruct LambMultiTensorSyncRangeAndLock {\n int leading_block;\n int number_blocks;\n int completed_blocks;\n};\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ntemplate \nstruct LambMultiTensorUpdateFunctor {\n void operator()(\n cudaStream_t stream, ChunkGroup<7> chunk_group, const T1* eta, const float ratio_min, const float ratio_max);\n};\n} \n} \n\n###", "hip": " \n\n#pragma once\n#include \n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"core/providers/rocm/multi_tensor/common.cuh\"\n#include \"core/framework/stream_handles.h\"\nnamespace onnxruntime {\nnamespace rocm {\n\n\n\n\ntemplate \nvoid LambComputeDirection(\n hipStream_t stream, const T1* weights, const T2* grads, const T3* moment_1, const T3* moment_2, const T1* loss_scale, const T_GRAD_NORM* grad_norm, float alpha, float beta, float lambda, float epsilon, float max_norm, float alpha_correction, float beta_correction, T2* update_direction, T3* moment_1_out, T3* moment_2_out, size_t count);\n\n\n\n\ntemplate \nvoid LambUpdate(\n hipStream_t stream, const T1* eta, const float ratio_min, const float ratio_max, const T2* r_norm, const T2* w_norm, const T2* weights, const T3* update_direction, T2* weights_out, T3* gradients_out, T_MIXED_PRECISION_FP* mixed_precision_weights_out, size_t count);\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ntemplate \nstruct LambMultiTensorComputeDirectionFunctor {\n void operator()(\n hipStream_t stream, ChunkGroup<6> chunk_group, const T1* loss_scale, const T_GRAD_NORM* grad_norm, const float lambda, const float alpha, const float beta, const float epsilon, const float max_norm, const float alpha_correction, const float beta_correction);\n};\n\n\n\n\n\n\n\n\n\n\n\n\n\ntemplate \nstruct LambMultiTensorReductionFunctor {\n void operator()(\n hipStream_t stream, ChunkGroup<4> chunk_group, const RocmKernel& kernel, void* reduction_buffer, size_t reduction_buffer_size, onnxruntime::Stream* ort_stream);\n};\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nstruct LambMultiTensorSyncRangeAndLock {\n int leading_block;\n int number_blocks;\n int completed_blocks;\n};\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ntemplate \nstruct LambMultiTensorUpdateFunctor {\n void operator()(\n hipStream_t stream, ChunkGroup<7> chunk_group, const T1* eta, const float ratio_min, const float ratio_max);\n};\n} \n} ###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"sg.h\"\n#include \"sg_impl.h\"\n\n#include \"core/providers/cuda/reduction/reduction_functions.h\"\n#include \"core/providers/cuda/math/binary_elementwise_ops.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nONNX_OPERATOR_KERNEL_EX(\n SGDOptimizer,\n kMSDomain,\n 1,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .Alias(1, 0) // Update weights in-place\n .Alias(2, 1) // Update gradients in-place\n .TypeConstraint(\"T\", DataTypeImpl::GetTensorType()),\n SGDOptimizer);\n\nStatus SGDOptimizer::ComputeInternal(OpKernelContext* ctx) const {\n const Tensor& ETA = *ctx->Input(0);\n const Tensor& W = *ctx->Input(1);\n const Tensor& G = *ctx->Input(2);\n Tensor* NW = ctx->Output(0, W.Shape());\n Tensor* NG = ctx->Output(1, G.Shape());\n\n ORT_ENFORCE(W.Shape() == G.Shape());\n\n SGDOptimizerImpl(\n Stream(ctx),\n ETA.template Data(),\n W.template Data(),\n G.template Data(),\n NW != nullptr ? NW->template MutableData() : nullptr,\n NG != nullptr ? NG->template MutableData() : nullptr,\n W.Shape().Size());\n\n return Status::OK();\n}\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"sg.h\"\n#include \"sg_impl.h\"\n\n#include \"core/providers/rocm/reduction/reduction_functions.h\"\n#include \"core/providers/rocm/math/binary_elementwise_ops.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nONNX_OPERATOR_KERNEL_EX(\n SGDOptimizer,\n kMSDomain,\n 1,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .Alias(1, 0) // Update weights in-place\n .Alias(2, 1) // Update gradients in-place\n .TypeConstraint(\"T\", DataTypeImpl::GetTensorType()),\n SGDOptimizer);\n\nStatus SGDOptimizer::ComputeInternal(OpKernelContext* ctx) const {\n const Tensor& ETA = *ctx->Input(0);\n const Tensor& W = *ctx->Input(1);\n const Tensor& G = *ctx->Input(2);\n Tensor* NW = ctx->Output(0, W.Shape());\n Tensor* NG = ctx->Output(1, G.Shape());\n\n ORT_ENFORCE(W.Shape() == G.Shape());\n\n SGDOptimizerImpl(\n Stream(ctx),\n ETA.template Data(),\n W.template Data(),\n G.template Data(),\n NW != nullptr ? NW->template MutableData() : nullptr,\n NG != nullptr ? NG->template MutableData() : nullptr,\n W.Shape().Size());\n\n return Status::OK();\n}\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass SGDOptimizer final : public CudaKernel {\n public:\n SGDOptimizer(const OpKernelInfo& info) : CudaKernel(info) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass SGDOptimizer final : public RocmKernel {\n public:\n SGDOptimizer(const OpKernelInfo& info) : RocmKernel(info) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"sg_impl.h\"\n#include \"core/providers/cuda/cuda_common.h\"\n#include \"core/providers/cuda/cu_inc/common.cuh\"\n#include \"core/providers/cuda/atomic/common.cuh\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \n__global__ void _SGDOptimizer(\n const T* eta,\n const T* weights,\n const T* gradients,\n T* weights_out,\n T* gradients_out,\n CUDA_LONG N) {\n CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);\n\n const T delta = -(*eta) * gradients[id];\n\n if (gradients_out) {\n gradients_out[id] = delta;\n }\n if (weights_out) {\n weights_out[id] = weights[id] + delta;\n }\n}\n\ntemplate \nvoid SGDOptimizerImpl(\n cudaStream_t stream,\n const T* eta,\n const T* weights,\n const T* gradients,\n T* weights_out,\n T* gradients_out,\n size_t count) {\n int blocksPerGrid = (int)(ceil(static_cast(count) / GridDim::maxThreadsPerBlock));\n CUDA_LONG N = static_cast(count);\n _SGDOptimizer<<>>(\n eta,\n weights,\n gradients,\n weights_out,\n gradients_out,\n N);\n}\n\n#define SPECIALIZED_IMPL__SGDOptimizerImpl(T) \\\n template void SGDOptimizerImpl( \\\n cudaStream_t stream, \\\n const T* eta, \\\n const T* weights, \\\n const T* gradients, \\\n T* weights_out, \\\n T* gradients_out, \\\n size_t count);\n\nSPECIALIZED_IMPL__SGDOptimizerImpl(float)\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " #include \"hip/hip_runtime.h\"\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"sg_impl.h\"\n#include \"core/providers/rocm/rocm_common.h\"\n#include \"core/providers/rocm/cu_inc/common.cuh\"\n#include \"core/providers/rocm/atomic/common.cuh\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \n__global__ void _SGDOptimizer(\n const T* eta,\n const T* weights,\n const T* gradients,\n T* weights_out,\n T* gradients_out,\n HIP_LONG N) {\n CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);\n\n const T delta = -(*eta) * gradients[id];\n\n if (gradients_out) {\n gradients_out[id] = delta;\n }\n if (weights_out) {\n weights_out[id] = weights[id] + delta;\n }\n}\n\ntemplate \nvoid SGDOptimizerImpl(\n hipStream_t stream,\n const T* eta,\n const T* weights,\n const T* gradients,\n T* weights_out,\n T* gradients_out,\n size_t count) {\n int blocksPerGrid = (int)(ceil(static_cast(count) / GridDim::maxThreadsPerBlock));\n HIP_LONG N = static_cast(count);\n _SGDOptimizer<<>>(\n eta,\n weights,\n gradients,\n weights_out,\n gradients_out,\n N);\n}\n\n#define SPECIALIZED_IMPL__SGDOptimizerImpl(T) \\\n template void SGDOptimizerImpl( \\\n hipStream_t stream, \\\n const T* eta, \\\n const T* weights, \\\n const T* gradients, \\\n T* weights_out, \\\n T* gradients_out, \\\n size_t count);\n\nSPECIALIZED_IMPL__SGDOptimizerImpl(float)\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n\nnamespace onnxruntime {\nnamespace cuda {\ntemplate \nvoid SGDOptimizerImpl(\n cudaStream_t stream,\n const T* eta,\n const T* weights,\n const T* gradients,\n T* weight_out,\n T* gradients_out,\n size_t count);\n}\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n\nnamespace onnxruntime {\nnamespace rocm {\ntemplate \nvoid SGDOptimizerImpl(\n hipStream_t stream,\n const T* eta,\n const T* weights,\n const T* gradients,\n T* weight_out,\n T* gradients_out,\n size_t count);\n}\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"orttraining/training_ops/cuda/optimizer/common.h\"\n#include \"orttraining/training_ops/cuda/optimizer/sgd/sgd.h\"\n#include \"orttraining/training_ops/cuda/optimizer/sgd/sgd_impl.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nONNX_OPERATOR_KERNEL_EX(\n SGDOptimizerV2,\n kMSDomain,\n 1,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .InputMemoryType(OrtMemTypeCPUInput, 0)\n .InputMemoryType(OrtMemTypeCPUInput, 3)\n .OutputMemoryType(OrtMemTypeCPUOutput, 0)\n .Alias(1, 1) // Update weights in-place\n .TypeConstraint(\"T1\", DataTypeImpl::GetTensorType())\n .TypeConstraint(\"T_BOOL\", DataTypeImpl::GetTensorType())\n .TypeConstraint(\"S_WEIGHT\", DataTypeImpl::AllFixedSizeSequenceTensorTypes())\n .TypeConstraint(\"S_GRAD\", DataTypeImpl::AllFixedSizeSequenceTensorTypes()),\n SGDOptimizerV2);\n\nStatus SGDOptimizerV2::ComputeInternal(OpKernelContext* ctx) const {\n SGDOptimizerV2Base::Prepare p;\n ORT_RETURN_IF_ERROR(PrepareForCompute(ctx, p));\n\n bool* updated_flag_ptr = p.update_completed->template MutableData();\n\n // Currently placed on CPU, need revisit when we had mixed precision training requirement.\n const Tensor* update_signal = ctx->Input(3);\n if (update_signal == nullptr || *update_signal->template Data()) {\n typedef typename ToCudaType::MappedType CudaT_FLOAT;\n typedef SGDMTAFunctor TFunctor;\n TFunctor functor;\n\n const float* lr_ptr = p.learning_rate->template Data();\n ORT_ENFORCE(lr_ptr);\n\n launch_multi_tensor_functor(\n Stream(ctx), MTA_SGD_CHUNK_SIZE, p.grouped_tensor_sizes, p.grouped_tensor_pointers, functor,\n *lr_ptr);\n *updated_flag_ptr = true;\n } else {\n *updated_flag_ptr = false;\n }\n\n if (p.updated_weights != nullptr) {\n ORT_RETURN_IF_ERROR(CopyIfNotSameCUDABuffer(ctx, p.num_of_weights, p.weights, p.updated_weights));\n }\n\n return Status::OK();\n}\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"orttraining/training_ops/rocm/optimizer/common.h\"\n#include \"orttraining/training_ops/rocm/optimizer/sgd/sgd.h\"\n#include \"orttraining/training_ops/rocm/optimizer/sgd/sgd_impl.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nONNX_OPERATOR_KERNEL_EX(\n SGDOptimizerV2,\n kMSDomain,\n 1,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .InputMemoryType(OrtMemTypeCPUInput, 0)\n .InputMemoryType(OrtMemTypeCPUInput, 3)\n .OutputMemoryType(OrtMemTypeCPUOutput, 0)\n .Alias(1, 1) // Update weights in-place\n .TypeConstraint(\"T1\", DataTypeImpl::GetTensorType())\n .TypeConstraint(\"T_BOOL\", DataTypeImpl::GetTensorType())\n .TypeConstraint(\"S_WEIGHT\", DataTypeImpl::AllFixedSizeSequenceTensorTypes())\n .TypeConstraint(\"S_GRAD\", DataTypeImpl::AllFixedSizeSequenceTensorTypes()),\n SGDOptimizerV2);\n\nStatus SGDOptimizerV2::ComputeInternal(OpKernelContext* ctx) const {\n SGDOptimizerV2Base::Prepare p;\n ORT_RETURN_IF_ERROR(PrepareForCompute(ctx, p));\n\n bool* updated_flag_ptr = p.update_completed->template MutableData();\n\n // Currently placed on CPU, need revisit when we had mixed precision training requirement.\n const Tensor* update_signal = ctx->Input(3);\n if (update_signal == nullptr || *update_signal->template Data()) {\n typedef typename ToHipType::MappedType HipT_FLOAT;\n typedef SGDMTAFunctor TFunctor;\n TFunctor functor;\n\n const float* lr_ptr = p.learning_rate->template Data();\n ORT_ENFORCE(lr_ptr);\n\n launch_multi_tensor_functor(\n Stream(ctx), MTA_SGD_CHUNK_SIZE, p.grouped_tensor_sizes, p.grouped_tensor_pointers, functor,\n *lr_ptr);\n *updated_flag_ptr = true;\n } else {\n *updated_flag_ptr = false;\n }\n\n if (p.updated_weights != nullptr) {\n ORT_RETURN_IF_ERROR(CopyIfNotSameROCMBuffer(ctx, p.num_of_weights, p.weights, p.updated_weights));\n }\n\n return Status::OK();\n}\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"orttraining/training_ops/cpu/optimizer/sgd/sgdbase.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass SGDOptimizerV2 final : public CudaKernel, public contrib::SGDOptimizerV2Base {\n public:\n SGDOptimizerV2(const OpKernelInfo& info) : CudaKernel(info) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/common.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"orttraining/training_ops/cpu/optimizer/sgd/sgdbase.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass SGDOptimizerV2 final : public RocmKernel, public contrib::SGDOptimizerV2Base {\n public:\n SGDOptimizerV2(const OpKernelInfo& info) : RocmKernel(info) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"orttraining/training_ops/cuda/optimizer/sgd/sgd_impl.h\"\n#include \"core/providers/cuda/cuda_common.h\"\n#include \"core/providers/cuda/cu_inc/common.cuh\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \n__global__ void SGDCompute(ChunkGroup chunks, const float lr) {\n const int block_idx = blockIdx.x;\n T_WEIGHT* weight_chunk_ptr;\n T_GRAD* grad_chunk_ptr;\n int chunk_size;\n const int tensor_idx = chunks.block_index_to_tensor_group_index[block_idx];\n const int tensor_size = chunks.tensor_sizes[tensor_idx];\n T_WEIGHT* weight_tensor_ptr = static_cast(chunks.tensor_ptrs[0][tensor_idx]);\n T_GRAD* grad_tensor_ptr = static_cast(chunks.tensor_ptrs[1][tensor_idx]);\n const int chunk_start_idx = chunks.block_index_to_chunk_start_index[block_idx];\n // chunk_size is chunks.chunk_size if the loaded chunk is full. Otherwise (this\n // chunk is the last one in the source tensor), the actual size is determined\n // by the bound of the source tensor.\n chunk_size = min(tensor_size, chunk_start_idx + chunks.chunk_size) - chunk_start_idx;\n\n weight_chunk_ptr = weight_tensor_ptr + chunk_start_idx;\n grad_chunk_ptr = grad_tensor_ptr + chunk_start_idx;\n\n#pragma unroll 4\n for (int i = threadIdx.x; i < chunk_size; i += blockDim.x) {\n float w = static_cast(weight_chunk_ptr[i]);\n float g = static_cast(grad_chunk_ptr[i]);\n w = w + -lr * g;\n // Update the new weight.\n weight_chunk_ptr[i] = static_cast(w);\n }\n}\n\ntemplate \nvoid SGDMTAFunctor::operator()(cudaStream_t stream,\n ChunkGroup chunks,\n const float lr) {\n const int block_count = chunks.chunk_count;\n const int thread_count = ChunkGroup::thread_count_per_block;\n SGDCompute<<>>(chunks, lr);\n}\n\n#define INSTANTIATE_SGD_FUNCTOR(T_WEIGHT, T_GRAD) \\\n template void SGDMTAFunctor::operator()(cudaStream_t stream, \\\n ChunkGroup chunks, \\\n const float lr); \\\n template __global__ void SGDCompute(ChunkGroup chunks, \\\n const float lr);\n\nINSTANTIATE_SGD_FUNCTOR(float, float)\n\n#undef INSTANTIATE_SGD_FUNCTOR\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " #include \"hip/hip_runtime.h\"\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"orttraining/training_ops/rocm/optimizer/sgd/sgd_impl.h\"\n#include \"core/providers/rocm/rocm_common.h\"\n#include \"core/providers/rocm/cu_inc/common.cuh\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \n__global__ void SGDCompute(ChunkGroup chunks, const float lr) {\n const int block_idx = blockIdx.x;\n T_WEIGHT* weight_chunk_ptr;\n T_GRAD* grad_chunk_ptr;\n int chunk_size;\n const int tensor_idx = chunks.block_index_to_tensor_group_index[block_idx];\n const int tensor_size = chunks.tensor_sizes[tensor_idx];\n T_WEIGHT* weight_tensor_ptr = static_cast(chunks.tensor_ptrs[0][tensor_idx]);\n T_GRAD* grad_tensor_ptr = static_cast(chunks.tensor_ptrs[1][tensor_idx]);\n const int chunk_start_idx = chunks.block_index_to_chunk_start_index[block_idx];\n // chunk_size is chunks.chunk_size if the loaded chunk is full. Otherwise (this\n // chunk is the last one in the source tensor), the actual size is determined\n // by the bound of the source tensor.\n chunk_size = min(tensor_size, chunk_start_idx + chunks.chunk_size) - chunk_start_idx;\n\n weight_chunk_ptr = weight_tensor_ptr + chunk_start_idx;\n grad_chunk_ptr = grad_tensor_ptr + chunk_start_idx;\n\n#pragma unroll 4\n for (int i = threadIdx.x; i < chunk_size; i += blockDim.x) {\n float w = static_cast(weight_chunk_ptr[i]);\n float g = static_cast(grad_chunk_ptr[i]);\n w = w + -lr * g;\n // Update the new weight.\n weight_chunk_ptr[i] = static_cast(w);\n }\n}\n\ntemplate \nvoid SGDMTAFunctor::operator()(hipStream_t stream,\n ChunkGroup chunks,\n const float lr) {\n const int block_count = chunks.chunk_count;\n const int thread_count = ChunkGroup::thread_count_per_block;\n SGDCompute<<>>(chunks, lr);\n}\n\n#define INSTANTIATE_SGD_FUNCTOR(T_WEIGHT, T_GRAD) \\\n template void SGDMTAFunctor::operator()(hipStream_t stream, \\\n ChunkGroup chunks, \\\n const float lr); \\\n template __global__ void SGDCompute(ChunkGroup chunks, \\\n const float lr);\n\nINSTANTIATE_SGD_FUNCTOR(float, float)\n\n#undef INSTANTIATE_SGD_FUNCTOR\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/multi_tensor/common.cuh\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\n#define MTA_SGD_GROUP_SIZE 2\n#define MTA_SGD_CHUNK_SIZE 2048 * 32\n\ntemplate \nstruct SGDMTAFunctor {\n void operator()(cudaStream_t stream, ChunkGroup chunks, const float lr);\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/multi_tensor/common.cuh\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\n#define MTA_SGD_GROUP_SIZE 2\n#define MTA_SGD_CHUNK_SIZE 2048 * 32\n\ntemplate \nstruct SGDMTAFunctor {\n void operator()(hipStream_t stream, ChunkGroup chunks, const float lr);\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\ntemplate \nclass MatMulInteger final : public CudaKernel {\n using Base = CudaKernel;\n\n public:\n MatMulInteger(const OpKernelInfo& info) : CudaKernel(info) {\n has_a_zero_point_ = false;\n has_b_zero_point_ = false;\n if (info.GetInputCount() > 2) {\n has_a_zero_point_ = true;\n }\n if (info.GetInputCount() > 3) {\n has_b_zero_point_ = true;\n }\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n bool has_a_zero_point_;\n bool has_b_zero_point_;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\ntemplate \nclass MatMulInteger final : public RocmKernel {\n using Base = RocmKernel;\n\n public:\n MatMulInteger(const OpKernelInfo& info) : RocmKernel(info) {\n has_a_zero_point_ = false;\n has_b_zero_point_ = false;\n if (info.GetInputCount() > 2) {\n has_a_zero_point_ = true;\n }\n if (info.GetInputCount() > 3) {\n has_b_zero_point_ = true;\n }\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n bool has_a_zero_point_;\n bool has_b_zero_point_;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n#include \"core/common/common.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nclass FakeQuant final : public CudaKernel {\n public:\n FakeQuant(const OpKernelInfo& info) : CudaKernel(info) {\n info.GetAttrOrDefault(\"quant_min\", &quant_min_, static_cast(0));\n info.GetAttrOrDefault(\"quant_max\", &quant_max_, static_cast(255));\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n int64_t quant_min_;\n int64_t quant_max_;\n};\n\ntemplate \nclass FakeQuantGrad final : public CudaKernel {\n public:\n FakeQuantGrad(const OpKernelInfo& info) : CudaKernel(info) {\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n#include \"core/common/common.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nclass FakeQuant final : public RocmKernel {\n public:\n FakeQuant(const OpKernelInfo& info) : RocmKernel(info) {\n info.GetAttrOrDefault(\"quant_min\", &quant_min_, static_cast(0));\n info.GetAttrOrDefault(\"quant_max\", &quant_max_, static_cast(255));\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n int64_t quant_min_;\n int64_t quant_max_;\n};\n\ntemplate \nclass FakeQuantGrad final : public RocmKernel {\n public:\n FakeQuantGrad(const OpKernelInfo& info) : RocmKernel(info) {\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nvoid FakeQuantPerTensor(cudaStream_t stream, const int64_t num_elements, const T* input_data, const T quant_scale,\n const T quant_zero_point, const int64_t quant_min, const int64_t quant_max,\n T* fake_quantized_data, bool* quantization_mask_data);\n\ntemplate \nvoid FakeQuantGradImpl(cudaStream_t stream, const int64_t num_elements, const T* dY_data,\n const bool* gradient_mask_data, T* dX_data);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nvoid FakeQuantPerTensor(hipStream_t stream, const int64_t num_elements, const T* input_data, const T quant_scale,\n const T quant_zero_point, const int64_t quant_min, const int64_t quant_max,\n T* fake_quantized_data, bool* quantization_mask_data);\n\ntemplate \nvoid FakeQuantGradImpl(hipStream_t stream, const int64_t num_elements, const T* dY_data,\n const bool* gradient_mask_data, T* dX_data);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"orttraining/training_ops/cuda/reduction/all.h\"\n#include \"orttraining/training_ops/cuda/reduction/all_impl.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\n#define REGISTER_ALL_KERNEL_TYPED(T) \\\n ONNX_OPERATOR_TYPED_KERNEL_EX( \\\n All, \\\n kMSDomain, \\\n 1, \\\n T, \\\n kCudaExecutionProvider, \\\n (*KernelDefBuilder::Create()).TypeConstraint(\"T\", DataTypeImpl::GetTensorType()), \\\n All);\n\ntemplate \nStatus All::ComputeInternal(OpKernelContext* ctx) const {\n const Tensor& input = *ctx->Input(0);\n Tensor& output = *ctx->Output(0, {1});\n\n const auto size = input.Shape().Size();\n ORT_ENFORCE(size <= std::numeric_limits::max(), \"Number of reduced elements (\",\n size, \") exceeds the max allowed value (\", std::numeric_limits::max(), \").\");\n\n LaunchAllKernel(\n Stream(ctx),\n input.Data(),\n static_cast(size),\n output.MutableData());\n return Status::OK();\n}\n\nREGISTER_ALL_KERNEL_TYPED(bool)\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"orttraining/training_ops/rocm/reduction/all.h\"\n#include \"orttraining/training_ops/rocm/reduction/all_impl.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\n#define REGISTER_ALL_KERNEL_TYPED(T) \\\n ONNX_OPERATOR_TYPED_KERNEL_EX( \\\n All, \\\n kMSDomain, \\\n 1, \\\n T, \\\n kRocmExecutionProvider, \\\n (*KernelDefBuilder::Create()).TypeConstraint(\"T\", DataTypeImpl::GetTensorType()), \\\n All);\n\ntemplate \nStatus All::ComputeInternal(OpKernelContext* ctx) const {\n const Tensor& input = *ctx->Input(0);\n Tensor& output = *ctx->Output(0, {1});\n\n const auto size = input.Shape().Size();\n ORT_ENFORCE(size <= std::numeric_limits::max(), \"Number of reduced elements (\",\n size, \") exceeds the max allowed value (\", std::numeric_limits::max(), \").\");\n\n LaunchAllKernel(\n Stream(ctx),\n input.Data(),\n static_cast(size),\n output.MutableData());\n return Status::OK();\n}\n\nREGISTER_ALL_KERNEL_TYPED(bool)\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nclass All final : public CudaKernel {\n public:\n All(const OpKernelInfo& info) : CudaKernel(info) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nclass All final : public RocmKernel {\n public:\n All(const OpKernelInfo& info) : RocmKernel(info) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"orttraining/training_ops/cuda/reduction/all_impl.h\"\n\n#include \n#include \n#include \n\n#ifdef _WIN32\n#pragma warning(disable : 4244)\n#endif\nnamespace onnxruntime {\nnamespace cuda {\n\n__global__ void assign_true(bool* ptr) {\n *ptr = true;\n}\n\n__global__ void assign_false(bool* ptr) {\n *ptr = false;\n}\n\ntemplate<>\nvoid LaunchAllKernel(cudaStream_t stream, const bool* data, const int size, bool* output) {\n if(thrust::all_of(thrust::cuda::par.on(stream), data, data + size, thrust::identity())) {\n assign_true<<<1, 1, 0, stream>>>(output);\n }\n else\n {\n assign_false<<<1, 1, 0, stream>>>(output);\n }\n}\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " #include \"hip/hip_runtime.h\"\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"orttraining/training_ops/rocm/reduction/all_impl.h\"\n\n#include \n#include \n#include \n\n#ifdef _WIN32\n#pragma warning(disable : 4244)\n#endif\nnamespace onnxruntime {\nnamespace rocm {\n\n__global__ void assign_true(bool* ptr) {\n *ptr = true;\n}\n\n__global__ void assign_false(bool* ptr) {\n *ptr = false;\n}\n\ntemplate<>\nvoid LaunchAllKernel(hipStream_t stream, const bool* data, const int size, bool* output) {\n if(thrust::all_of(thrust::hip::par.on(stream), data, data + size, thrust::identity())) {\n assign_true<<<1, 1, 0, stream>>>(output);\n }\n else\n {\n assign_false<<<1, 1, 0, stream>>>(output);\n }\n}\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n\nnamespace onnxruntime {\nnamespace cuda {\ntemplate \nvoid LaunchAllKernel(cudaStream_t stream, const T* data, const int size, bool* output);\n}\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n\nnamespace onnxruntime {\nnamespace rocm {\ntemplate \nvoid LaunchAllKernel(hipStream_t stream, const T* data, const int size, bool* output);\n}\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nclass ReduceAllL2 final : public CudaKernel {\n public:\n ReduceAllL2(const OpKernelInfo& info) : CudaKernel(info) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nclass ReduceAllL2 final : public RocmKernel {\n public:\n ReduceAllL2(const OpKernelInfo& info) : RocmKernel(info) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n#include \"core/providers/cuda/multi_tensor/common.cuh\"\n\nnamespace onnxruntime {\nnamespace cuda {\ntemplate \nstruct MultiTensorReduceL2 {\n void operator()(cudaStream_t stream, ChunkGroup<1> chunk_group, TOut* output);\n};\n\ntemplate \nvoid ScalarSqrt(cudaStream_t stream, Tin* input, Tout* output);\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n#include \"core/providers/rocm/multi_tensor/common.cuh\"\n\nnamespace onnxruntime {\nnamespace rocm {\ntemplate \nstruct MultiTensorReduceL2 {\n void operator()(hipStream_t stream, ChunkGroup<1> chunk_group, TOut* output);\n};\n\ntemplate \nvoid ScalarSqrt(hipStream_t stream, Tin* input, Tout* output);\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/optional.h\"\n#include \"core/providers/cuda/cuda_common.h\"\n#include \"core/providers/cuda/reduction/reduction_ops.h\"\n#include \"core/providers/cuda/reduction/reduction_functions.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nclass ReduceSumTraining final : public ReduceKernel {\n public:\n ReduceSumTraining(const OpKernelInfo& info) : ReduceKernel(info) {\n fast_reduction_ = true;\n }\n\n Status ComputeInternal(OpKernelContext* ctx) const override {\n return ComputeImplEx(ctx, CUDNN_REDUCE_TENSOR_ADD);\n }\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/common/optional.h\"\n#include \"core/providers/rocm/rocm_common.h\"\n#include \"core/providers/rocm/reduction/reduction_ops.h\"\n#include \"core/providers/rocm/reduction/reduction_functions.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nclass ReduceSumTraining final : public ReduceKernel {\n public:\n ReduceSumTraining(const OpKernelInfo& info) : ReduceKernel(info) {\n fast_reduction_ = true;\n }\n\n Status ComputeInternal(OpKernelContext* ctx) const override {\n return ComputeImplEx(ctx, MIOPEN_REDUCE_TENSOR_ADD);\n }\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n#pragma once\n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"core/providers/cpu/tensor/concatbase.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass ConcatTraining final : public CudaKernel, public ConcatBase {\n public:\n ConcatTraining(const OpKernelInfo& info) : CudaKernel(info), ConcatBase(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n#pragma once\n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"core/providers/cpu/tensor/concatbase.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass ConcatTraining final : public RocmKernel, public ConcatBase {\n public:\n ConcatTraining(const OpKernelInfo& info) : RocmKernel(info), ConcatBase(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nclass TopK final : public CudaKernel {\n public:\n TopK(const OpKernelInfo&);\n Status ComputeInternal(OpKernelContext*) const override;\n\n private:\n int64_t axis_;\n int64_t largest_;\n int64_t sorted_;\n mutable int64_t K_;\n};\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nclass TopK final : public RocmKernel {\n public:\n TopK(const OpKernelInfo&);\n Status ComputeInternal(OpKernelContext*) const override;\n\n private:\n int64_t axis_;\n int64_t largest_;\n int64_t sorted_;\n mutable int64_t K_;\n};\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n#pragma once\n\n#include \"core/common/common.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass GatherElementsGrad final : public CudaKernel {\n public:\n GatherElementsGrad(const OpKernelInfo& info) : CudaKernel(info) {\n info.GetAttrOrDefault(\"axis\", &axis_, static_cast(0));\n }\n ~GatherElementsGrad() = default;\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n template \n struct ComputeImpl;\n\n int64_t axis_;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n#pragma once\n\n#include \"core/common/common.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass GatherElementsGrad final : public RocmKernel {\n public:\n GatherElementsGrad(const OpKernelInfo& info) : RocmKernel(info) {\n info.GetAttrOrDefault(\"axis\", &axis_, static_cast(0));\n }\n ~GatherElementsGrad() = default;\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n template \n struct ComputeImpl;\n\n int64_t axis_;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nstruct GatherScatterElementsArgs;\n\ntemplate \nStatus GatherElementsGradImpl(cudaStream_t stream, const TIndex* indices_data, const T* updates_data, T* output_data,\n const GatherScatterElementsArgs& args);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nstruct GatherScatterElementsArgs;\n\ntemplate \nStatus GatherElementsGradImpl(hipStream_t stream, const TIndex* indices_data, const T* updates_data, T* output_data,\n const GatherScatterElementsArgs& args);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass GatherGrad final : public CudaKernel {\n public:\n GatherGrad(const OpKernelInfo& info) : CudaKernel(info) {\n ORT_ENFORCE(info.GetAttr(\"axis\", &axis_).IsOK(), \"Missing/Invalid 'axis' attribute value\");\n }\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n int64_t axis_;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass GatherGrad final : public RocmKernel {\n public:\n GatherGrad(const OpKernelInfo& info) : RocmKernel(info) {\n ORT_ENFORCE(info.GetAttr(\"axis\", &axis_).IsOK(), \"Missing/Invalid 'axis' attribute value\");\n }\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n int64_t axis_;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n#include \"core/framework/stream_handles.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass CudaScratchBufferAllocator {\n public:\n explicit CudaScratchBufferAllocator(const CudaKernel& kernel, Stream* stream) : kernel_{kernel}, stream_{stream} {\n }\n\n template \n IAllocatorUniquePtr GetScratchBuffer(size_t count_or_bytes) const {\n return kernel_.GetScratchBuffer(count_or_bytes, stream_);\n }\n\n private:\n const CudaKernel& kernel_;\n Stream* stream_;\n};\n\n// unit for handling indexing and counting of gathered indices\nusing GatheredIndexIndex_t = int32_t;\n\ntemplate \nvoid GatherGradImpl(\n cudaStream_t stream,\n const cudaDeviceProp& prop,\n const CudaScratchBufferAllocator& allocator,\n const T* dY_data,\n const TIndex* dX_indices,\n const GatheredIndexIndex_t num_gathered_indices,\n const int64_t gather_dimension_size,\n const int64_t num_gathered_per_index,\n const int64_t num_batches,\n T* dX_data);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n#include \"core/framework/stream_handles.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass CudaScratchBufferAllocator {\n public:\n explicit CudaScratchBufferAllocator(const RocmKernel& kernel, Stream* stream) : kernel_{kernel}, stream_{stream} {\n }\n\n template \n IAllocatorUniquePtr GetScratchBuffer(size_t count_or_bytes) const {\n return kernel_.GetScratchBuffer(count_or_bytes, stream_);\n }\n\n private:\n const RocmKernel& kernel_;\n Stream* stream_;\n};\n\n// unit for handling indexing and counting of gathered indices\nusing GatheredIndexIndex_t = int32_t;\n\ntemplate \nvoid GatherGradImpl(\n hipStream_t stream,\n const hipDeviceProp_t& prop,\n const CudaScratchBufferAllocator& allocator,\n const T* dY_data,\n const TIndex* dX_indices,\n const GatheredIndexIndex_t num_gathered_indices,\n const int64_t gather_dimension_size,\n const int64_t num_gathered_per_index,\n const int64_t num_batches,\n T* dX_data);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/common/common.h\"\n#include \"core/providers/cuda/tensor/gather_nd.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nclass GatherNDGrad final : public GatherNDBase {\n public:\n GatherNDGrad(const OpKernelInfo& info) : GatherNDBase(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/common/common.h\"\n#include \"core/providers/rocm/tensor/gather_nd.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nclass GatherNDGrad final : public GatherNDBase {\n public:\n GatherNDGrad(const OpKernelInfo& info) : GatherNDBase(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace rocm\n} // namespace onnxruntime###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n#ifdef _WIN32\n#pragma warning(disable : 4244)\n#endif\n#include \"orttraining/training_ops/cuda/tensor/gather_nd_grad_impl.h\"\n\n#include \"core/providers/cuda/cu_inc/common.cuh\"\n#include \"core/providers/cuda/atomic/common.cuh\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \n__global__ void _GatherNDGradKernel(\n const size_t num_slices,\n const T* update_data,\n T* output_data,\n const size_t slice_size,\n const int64_t* slice_offsets) {\n CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(i, num_slices * slice_size);\n uint64_t slice_offset = slice_offsets[i / slice_size];\n size_t j = i % slice_size;\n atomic_add(output_data + slice_offset + j, update_data[i]);\n};\n\ntemplate \nvoid GatherNDGradImpl(\n cudaStream_t stream,\n const size_t num_slices,\n const void* update_data,\n void* output_data,\n const size_t slice_size,\n const int64_t* input_slice_offsets_data) {\n const unsigned int blocks_per_grid = static_cast(CeilDiv(num_slices * slice_size, GridDim::maxThreadsPerBlock));\n _GatherNDGradKernel<<>>(\n num_slices, static_cast(update_data), static_cast(output_data), slice_size, input_slice_offsets_data);\n}\n\n#define SPECIALIZED_GRAD_IMPL(T) \\\n template void GatherNDGradImpl(cudaStream_t stream, const size_t num_slices, const void* update_data, void* output_data, const size_t slice_size, const int64_t* input_slice_offsets_data)\n\nSPECIALIZED_GRAD_IMPL(float);\n#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600\nSPECIALIZED_GRAD_IMPL(half);\nSPECIALIZED_GRAD_IMPL(double);\nSPECIALIZED_GRAD_IMPL(BFloat16);\n#endif\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " #include \"hip/hip_runtime.h\"\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n#ifdef _WIN32\n#pragma warning(disable : 4244)\n#endif\n#include \"orttraining/training_ops/rocm/tensor/gather_nd_grad_impl.h\"\n\n#include \"core/providers/rocm/cu_inc/common.cuh\"\n#include \"core/providers/rocm/atomic/common.cuh\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \n__global__ void _GatherNDGradKernel(\n const size_t num_slices,\n const T* update_data,\n T* output_data,\n const size_t slice_size,\n const int64_t* slice_offsets) {\n CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(i, num_slices * slice_size);\n uint64_t slice_offset = slice_offsets[i / slice_size];\n size_t j = i % slice_size;\n atomic_add(output_data + slice_offset + j, update_data[i]);\n};\n\ntemplate \nvoid GatherNDGradImpl(\n hipStream_t stream,\n const size_t num_slices,\n const void* update_data,\n void* output_data,\n const size_t slice_size,\n const int64_t* input_slice_offsets_data) {\n const unsigned int blocks_per_grid = static_cast(CeilDiv(num_slices * slice_size, GridDim::maxThreadsPerBlock));\n _GatherNDGradKernel<<>>(\n num_slices, static_cast(update_data), static_cast(output_data), slice_size, input_slice_offsets_data);\n}\n\n#define SPECIALIZED_GRAD_IMPL(T) \\\n template void GatherNDGradImpl(hipStream_t stream, const size_t num_slices, const void* update_data, void* output_data, const size_t slice_size, const int64_t* input_slice_offsets_data)\n\nSPECIALIZED_GRAD_IMPL(float);\n#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600\nSPECIALIZED_GRAD_IMPL(half);\nSPECIALIZED_GRAD_IMPL(double);\nSPECIALIZED_GRAD_IMPL(BFloat16);\n#endif\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n#include \n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nvoid GatherNDGradImpl(\n cudaStream_t stream,\n const size_t num_slices,\n const void* update_data,\n void* output_data,\n const size_t slice_size,\n const int64_t* input_slice_offsets_data);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n#include \n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nvoid GatherNDGradImpl(\n hipStream_t stream,\n const size_t num_slices,\n const void* update_data,\n void* output_data,\n const size_t slice_size,\n const int64_t* input_slice_offsets_data);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"orttraining/training_ops/cuda/tensor/slice_grad.h\"\n#include \"core/providers/cpu/tensor/utils.h\"\n#include \"core/providers/cuda/tensor/slice_impl.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nONNX_OPERATOR_KERNEL_EX(\n SliceGrad,\n kMSDomain,\n 1,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .InputMemoryType(OrtMemTypeCPUInput, 1)\n .InputMemoryType(OrtMemTypeCPUInput, 2)\n .InputMemoryType(OrtMemTypeCPUInput, 3)\n .InputMemoryType(OrtMemTypeCPUInput, 4)\n .InputMemoryType(OrtMemTypeCPUInput, 5)\n .TypeConstraint(\"I\", DataTypeImpl::GetTensorType())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .TypeConstraint(\"Tind\", std::vector{\n DataTypeImpl::GetTensorType(),\n DataTypeImpl::GetTensorType()}),\n SliceGrad);\n\nTensor* GetOutputGradientTensor(OpKernelContext* ctx) {\n const Tensor& shape = *ctx->Input(1);\n const TensorShape data_shape(shape.template Data(), shape.Shape().Size());\n return ctx->Output(0, data_shape);\n}\n\nconst Tensor* SliceGrad::GetSlicedOrUnslicedTensor(OpKernelContext* ctx) const {\n // The gradient computation logic is same as slice op except the assignment from input tensor to output tensor is\n // reversed, hence, the input tensor for slice op code (when used for gradient computation) would be the output\n // tensor for gradient op that will have the same shape as the input tensor for slice op when used for slicing and\n // not gradient computation.\n return GetOutputGradientTensor(ctx);\n}\n\nStatus SliceGrad::FillInputVectors(OpKernelContext* ctx, TensorShapeVector& input_starts,\n TensorShapeVector& input_ends, TensorShapeVector& input_axes,\n TensorShapeVector& input_steps) const {\n return FillVectorsFromInput(*ctx->Input(2), *ctx->Input(3), ctx->Input(4),\n ctx->Input(5), input_starts, input_ends, input_axes, input_steps);\n}\n\nStatus SliceGrad::CallSliceImp(size_t element_size, size_t dimension_count, const TArray& starts_buffer,\n const TArray& steps_buffer, const TArray& input_strides,\n const TArray& output_strides, OpKernelContext* ctx,\n const TensorShape& output_shape) const {\n Tensor* gradient_out_tensor = GetOutputGradientTensor(ctx);\n CUDA_RETURN_IF_ERROR(cudaMemsetAsync(gradient_out_tensor->MutableDataRaw(), 0, gradient_out_tensor->SizeInBytes(), Stream(ctx)));\n return SliceImplGrad(Stream(ctx),\n element_size,\n gsl::narrow_cast(dimension_count),\n starts_buffer,\n steps_buffer,\n input_strides,\n output_strides,\n ctx->Input(0)->DataRaw(),\n gradient_out_tensor->MutableDataRaw(),\n output_shape.Size());\n}\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"orttraining/training_ops/rocm/tensor/slice_grad.h\"\n#include \"core/providers/cpu/tensor/utils.h\"\n#include \"core/providers/rocm/tensor/slice_impl.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nONNX_OPERATOR_KERNEL_EX(\n SliceGrad,\n kMSDomain,\n 1,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .InputMemoryType(OrtMemTypeCPUInput, 1)\n .InputMemoryType(OrtMemTypeCPUInput, 2)\n .InputMemoryType(OrtMemTypeCPUInput, 3)\n .InputMemoryType(OrtMemTypeCPUInput, 4)\n .InputMemoryType(OrtMemTypeCPUInput, 5)\n .TypeConstraint(\"I\", DataTypeImpl::GetTensorType())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .TypeConstraint(\"Tind\", std::vector{\n DataTypeImpl::GetTensorType(),\n DataTypeImpl::GetTensorType()}),\n SliceGrad);\n\nTensor* GetOutputGradientTensor(OpKernelContext* ctx) {\n const Tensor& shape = *ctx->Input(1);\n const TensorShape data_shape(shape.template Data(), shape.Shape().Size());\n return ctx->Output(0, data_shape);\n}\n\nconst Tensor* SliceGrad::GetSlicedOrUnslicedTensor(OpKernelContext* ctx) const {\n // The gradient computation logic is same as slice op except the assignment from input tensor to output tensor is\n // reversed, hence, the input tensor for slice op code (when used for gradient computation) would be the output\n // tensor for gradient op that will have the same shape as the input tensor for slice op when used for slicing and\n // not gradient computation.\n return GetOutputGradientTensor(ctx);\n}\n\nStatus SliceGrad::FillInputVectors(OpKernelContext* ctx, TensorShapeVector& input_starts,\n TensorShapeVector& input_ends, TensorShapeVector& input_axes,\n TensorShapeVector& input_steps) const {\n return FillVectorsFromInput(*ctx->Input(2), *ctx->Input(3), ctx->Input(4),\n ctx->Input(5), input_starts, input_ends, input_axes, input_steps);\n}\n\nStatus SliceGrad::CallSliceImp(size_t element_size, size_t dimension_count, const TArray& starts_buffer,\n const TArray& steps_buffer, const TArray& input_strides,\n const TArray& output_strides, OpKernelContext* ctx,\n const TensorShape& output_shape) const {\n Tensor* gradient_out_tensor = GetOutputGradientTensor(ctx);\n HIP_RETURN_IF_ERROR(hipMemsetAsync(gradient_out_tensor->MutableDataRaw(), 0, gradient_out_tensor->SizeInBytes(), Stream(ctx)));\n return SliceImplGrad(Stream(ctx),\n element_size,\n gsl::narrow_cast(dimension_count),\n starts_buffer,\n steps_buffer,\n input_strides,\n output_strides,\n ctx->Input(0)->DataRaw(),\n gradient_out_tensor->MutableDataRaw(),\n output_shape.Size());\n}\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/cuda/tensor/slice.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass SliceGrad final : public Slice {\n public:\n SliceGrad(const OpKernelInfo& info) : Slice(info) {}\n\n private:\n const Tensor* GetSlicedOrUnslicedTensor(OpKernelContext* ctx) const override;\n Status FillInputVectors(OpKernelContext* ctx, TensorShapeVector& input_starts, TensorShapeVector& input_ends,\n TensorShapeVector& input_axes, TensorShapeVector& input_steps) const override;\n\n Status CallSliceImp(size_t element_size, size_t dimension_count, const TArray& starts_buffer,\n const TArray& steps_buffer, const TArray& input_strides,\n const TArray& output_strides, OpKernelContext* ctx, const TensorShape& output_shape)\n const override;\n};\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/rocm/tensor/slice.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass SliceGrad final : public Slice {\n public:\n SliceGrad(const OpKernelInfo& info) : Slice(info) {}\n\n private:\n const Tensor* GetSlicedOrUnslicedTensor(OpKernelContext* ctx) const override;\n Status FillInputVectors(OpKernelContext* ctx, TensorShapeVector& input_starts, TensorShapeVector& input_ends,\n TensorShapeVector& input_axes, TensorShapeVector& input_steps) const override;\n\n Status CallSliceImp(size_t element_size, size_t dimension_count, const TArray& starts_buffer,\n const TArray& steps_buffer, const TArray& input_strides,\n const TArray& output_strides, OpKernelContext* ctx, const TensorShape& output_shape)\n const override;\n};\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/common/common.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"core/providers/cpu/tensor/split.h\"\n#include \"orttraining/training_ops/cpu/tensor/split.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass SplitTraining final : public CudaKernel, public SplitBase {\n public:\n // ONNX Split from opset 13. no support for uneven splits that was added in opset 18.\n SplitTraining(const OpKernelInfo& info) : CudaKernel(info), SplitBase(info, 13) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/common/common.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"core/providers/cpu/tensor/split.h\"\n#include \"orttraining/training_ops/cpu/tensor/split.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass SplitTraining final : public RocmKernel, public SplitBase {\n public:\n // ONNX Split from opset 13. no support for uneven splits that was added in opset 18.\n SplitTraining(const OpKernelInfo& info) : RocmKernel(info), SplitBase(info, 13) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"core/common/common.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nStatus TopKImpl(const CudaKernel* kernel, Stream* ort_stream, const T* input_x, T* output_v, int64_t* output_i, const TArray& elem_nums, size_t size, int32_t axis, int64_t K, int64_t largest, int64_t sorted, int64_t N, int64_t dimension);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"core/common/common.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nStatus TopKImpl(const RocmKernel* kernel, Stream* ort_stream, const T* input_x, T* output_v, int64_t* output_i, const TArray& elem_nums, size_t size, int32_t axis, int64_t K, int64_t largest, int64_t sorted, int64_t N, int64_t dimension);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n\n\n#include \"orttraining/training_ops/cuda/tensor/view.h\"\nnamespace onnxruntime {\nnamespace cuda {\nnamespace {\nconstexpr int view_count_limit = 1024; \nstd::vector> GenerateAliasMapping() {\n std::vector> alias_pairs{};\n for (int i = 0; i < view_count_limit; ++i) {\n alias_pairs.emplace_back(std::make_pair(0, i));\n }\n return alias_pairs;\n}\nstd::vector GenerateInputMemoryType() {\n std::vector input_indexes{};\n for (int i = 1; i < 1 + view_count_limit; ++i) {\n input_indexes.emplace_back(i);\n }\n return input_indexes;\n}\n} \nONNX_OPERATOR_KERNEL_EX(\n View, kMSDomain, 1, kCudaExecutionProvider, (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .TypeConstraint(\"shapes\", DataTypeImpl::GetTensorType())\n .InputMemoryType(OrtMemTypeCPUInput, GenerateInputMemoryType()) \n .Alias(GenerateAliasMapping()), View);\nStatus View::ComputeInternal(OpKernelContext* context) const {\n const Tensor* X = context->Input(0);\n size_t bytes_per_elem = X->DataType()->Size();\n int view_count = context->InputCount() - 1;\n std::vector y_shapes(view_count);\n std::vector y_byte_offsets(view_count);\n size_t byte_offset = 0;\n for (int i = 0; i < view_count; ++i) {\n const Tensor* shape_tensor = context->Input(i + 1);\n if (shape_tensor->Shape().NumDimensions() != 1) {\n return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, \"A shape tensor must be a vector tensor, got \", shape_tensor->Shape().NumDimensions(), \" dimensions\");\n }\n size_t n_dims = static_cast(shape_tensor->Shape()[0]);\n const int64_t* shape_data = shape_tensor->template Data();\n y_shapes[i] = TensorShape(shape_data, n_dims);\n y_byte_offsets[i] = byte_offset;\n byte_offset += y_shapes[i].Size() * bytes_per_elem;\n }\n if (byte_offset != X->SizeInBytes()) {\n return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, \"The input view shapes doesn't adds up to match input buffer size.\");\n }\n const void* X_data = X->DataRaw();\n for (int i = 0; i < view_count; ++i) {\n \n Tensor* Y = context->Output(i, y_shapes[i]);\n if (Y != nullptr) {\n if (X_data != Y->MutableDataRaw()) {\n \n const void* source = static_cast(X_data) + y_byte_offsets[i];\n void* target = Y->MutableDataRaw();\n CUDA_RETURN_IF_ERROR(cudaMemcpyAsync(target, source, Y->SizeInBytes(), cudaMemcpyDeviceToDevice, Stream(context)));\n } else {\n Y->SetByteOffset(y_byte_offsets[i]);\n }\n }\n }\n return Status::OK();\n}\n} \n} \n\n###", "hip": " \n\n#include \"orttraining/training_ops/rocm/tensor/view.h\"\nnamespace onnxruntime {\nnamespace rocm {\nnamespace {\nconstexpr int view_count_limit = 1024; \nstd::vector> GenerateAliasMapping() {\n std::vector> alias_pairs{};\n for (int i = 0; i < view_count_limit; ++i) {\n alias_pairs.emplace_back(std::make_pair(0, i));\n }\n return alias_pairs;\n}\nstd::vector GenerateInputMemoryType() {\n std::vector input_indexes{};\n for (int i = 1; i < 1 + view_count_limit; ++i) {\n input_indexes.emplace_back(i);\n }\n return input_indexes;\n}\n} \nONNX_OPERATOR_KERNEL_EX(\n View, kMSDomain, 1, kRocmExecutionProvider, (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .TypeConstraint(\"shapes\", DataTypeImpl::GetTensorType())\n .InputMemoryType(OrtMemTypeCPUInput, GenerateInputMemoryType()) \n .Alias(GenerateAliasMapping()), View);\nStatus View::ComputeInternal(OpKernelContext* context) const {\n const Tensor* X = context->Input(0);\n size_t bytes_per_elem = X->DataType()->Size();\n int view_count = context->InputCount() - 1;\n std::vector y_shapes(view_count);\n std::vector y_byte_offsets(view_count);\n size_t byte_offset = 0;\n for (int i = 0; i < view_count; ++i) {\n const Tensor* shape_tensor = context->Input(i + 1);\n if (shape_tensor->Shape().NumDimensions() != 1) {\n return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, \"A shape tensor must be a vector tensor, got \", shape_tensor->Shape().NumDimensions(), \" dimensions\");\n }\n size_t n_dims = static_cast(shape_tensor->Shape()[0]);\n const int64_t* shape_data = shape_tensor->template Data();\n y_shapes[i] = TensorShape(shape_data, n_dims);\n y_byte_offsets[i] = byte_offset;\n byte_offset += y_shapes[i].Size() * bytes_per_elem;\n }\n if (byte_offset != X->SizeInBytes()) {\n return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, \"The input view shapes doesn't adds up to match input buffer size.\");\n }\n const void* X_data = X->DataRaw();\n for (int i = 0; i < view_count; ++i) {\n \n Tensor* Y = context->Output(i, y_shapes[i]);\n if (Y != nullptr) {\n if (X_data != Y->MutableDataRaw()) {\n \n const void* source = static_cast(X_data) + y_byte_offsets[i];\n void* target = Y->MutableDataRaw();\n HIP_RETURN_IF_ERROR(hipMemcpyAsync(target, source, Y->SizeInBytes(), hipMemcpyDeviceToDevice, Stream(context)));\n } else {\n Y->SetByteOffset(y_byte_offsets[i]);\n }\n }\n }\n return Status::OK();\n}\n} \n} ###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/common/common.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass View final : public CudaKernel {\n public:\n View(const OpKernelInfo& info) : CudaKernel(info) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/common/common.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass View final : public RocmKernel {\n public:\n View(const OpKernelInfo& info) : RocmKernel(info) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#ifdef ENABLE_TRAINING_TORCH_INTEROP\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"orttraining/core/framework/torch/refcount_tracker.h\"\n#include \"orttraining/training_ops/cuda/torch/torch_custom_function_kernel.h\"\n#include \"core/framework/ort_value.h\"\n\nusing namespace onnxruntime::language_interop_ops::torch;\n\nnamespace onnxruntime {\nnamespace cuda {\n\nONNX_OPERATOR_KERNEL_EX(\n PythonOp,\n kMSDomain,\n 1,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .OutputMemoryType(OrtMemTypeCPUOutput, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllTensorAndSequenceTensorTypes())\n .TypeConstraint(\"TInt64\", DataTypeImpl::GetTensorType()),\n PythonOp);\n\nONNX_OPERATOR_KERNEL_EX(\n PythonOpGrad,\n kMSDomain,\n 1,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .InputMemoryType(OrtMemTypeCPUInput, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllTensorAndSequenceTensorTypes())\n .TypeConstraint(\"TInt64\", DataTypeImpl::GetTensorType()),\n PythonOpGrad);\n\nStatus PythonOp::ComputeInternal(OpKernelContext* context) const {\n void* diff_ctx = nullptr;\n std::vector returned_ortvalues;\n RunForward(context, &diff_ctx, returned_ortvalues);\n\n SetOutputs(context, diff_ctx, returned_ortvalues);\n\n RefCountTracker::GetInstance().DumpDetails(\"Forward Kernel Completed\");\n return Status::OK();\n}\n\nStatus PythonOpGrad::ComputeInternal(OpKernelContext* context) const {\n std::vector returned_ortvalues;\n RunBackward(context, returned_ortvalues);\n\n SetOutputs(context, returned_ortvalues);\n\n RefCountTracker::GetInstance().DumpDetails(\"Backward Kernel Completed\");\n return Status::OK();\n}\n\n} // namespace cuda\n} // namespace onnxruntime\n\n#endif\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#ifdef ENABLE_TRAINING_TORCH_INTEROP\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"orttraining/core/framework/torch/refcount_tracker.h\"\n#include \"orttraining/training_ops/rocm/torch/torch_custom_function_kernel.h\"\n#include \"core/framework/ort_value.h\"\n\nusing namespace onnxruntime::language_interop_ops::torch;\n\nnamespace onnxruntime {\nnamespace rocm {\n\nONNX_OPERATOR_KERNEL_EX(\n PythonOp,\n kMSDomain,\n 1,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .OutputMemoryType(OrtMemTypeCPUOutput, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllTensorAndSequenceTensorTypes())\n .TypeConstraint(\"TInt64\", DataTypeImpl::GetTensorType()),\n PythonOp);\n\nONNX_OPERATOR_KERNEL_EX(\n PythonOpGrad,\n kMSDomain,\n 1,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .InputMemoryType(OrtMemTypeCPUInput, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllTensorAndSequenceTensorTypes())\n .TypeConstraint(\"TInt64\", DataTypeImpl::GetTensorType()),\n PythonOpGrad);\n\nStatus PythonOp::ComputeInternal(OpKernelContext* context) const {\n void* diff_ctx = nullptr;\n std::vector returned_ortvalues;\n RunForward(context, &diff_ctx, returned_ortvalues);\n\n SetOutputs(context, diff_ctx, returned_ortvalues);\n\n RefCountTracker::GetInstance().DumpDetails(\"Forward Kernel Completed\");\n return Status::OK();\n}\n\nStatus PythonOpGrad::ComputeInternal(OpKernelContext* context) const {\n std::vector returned_ortvalues;\n RunBackward(context, returned_ortvalues);\n\n SetOutputs(context, returned_ortvalues);\n\n RefCountTracker::GetInstance().DumpDetails(\"Backward Kernel Completed\");\n return Status::OK();\n}\n\n} // namespace rocm\n} // namespace onnxruntime\n\n#endif###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#ifdef ENABLE_TRAINING_TORCH_INTEROP\n\n#pragma once\n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"orttraining/training_ops/cpu/torch/torch_custom_function_kernel_base.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\n// Pytorch's torch.autograd.Function.apply(...) wrapper.\nclass PythonOp final : public contrib::PythonOpBase, public CudaKernel {\n public:\n PythonOp(const OpKernelInfo& info) : contrib::PythonOpBase(info), CudaKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n// Pytorch's torch.autograd.Function.backward(...) wrapper.\nclass PythonOpGrad final : public contrib::PythonOpGradBase, public CudaKernel {\n public:\n PythonOpGrad(const OpKernelInfo& info) : contrib::PythonOpGradBase(info), CudaKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n#endif\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#ifdef ENABLE_TRAINING_TORCH_INTEROP\n\n#pragma once\n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"orttraining/training_ops/cpu/torch/torch_custom_function_kernel_base.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\n// Pytorch's torch.autograd.Function.apply(...) wrapper.\nclass PythonOp final : public contrib::PythonOpBase, public RocmKernel {\n public:\n PythonOp(const OpKernelInfo& info) : contrib::PythonOpBase(info), RocmKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n// Pytorch's torch.autograd.Function.backward(...) wrapper.\nclass PythonOpGrad final : public contrib::PythonOpGradBase, public RocmKernel {\n public:\n PythonOpGrad(const OpKernelInfo& info) : contrib::PythonOpGradBase(info), RocmKernel(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n\n#endif###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#define TOPK_IMPL_TYPE MLFloat16\n#include \"topk_impl.cuh\"\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#define TOPK_IMPL_TYPE MLFloat16\n#include \"topk_impl.cuh\"\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#define TOPK_IMPL_TYPE float\n#include \"topk_impl.cuh\"\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#define TOPK_IMPL_TYPE float\n#include \"topk_impl.cuh\"\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/cu_inc/common.cuh\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\n#ifdef USE_ROCM\nconstexpr int kElementsPerThread = 2;\nconstexpr int kThreadsPerBlock = 512;\n#else\nconstexpr int kElementsPerThread = GridDim::maxElementsPerThread;\nconstexpr int kThreadsPerBlock = GridDim::maxThreadsPerBlock;\n#endif\n\ntemplate \n__global__ void ElementwiseKernel(T* output_data, const FuncT functor, CUDA_LONG N) {\n CUDA_LONG start = kElementsPerThread * kThreadsPerBlock * blockIdx.x + threadIdx.x;\n T value[kElementsPerThread];\n\n CUDA_LONG id = start;\n#pragma unroll\n for (int i = 0; i < kElementsPerThread; ++i) {\n if (id < N) {\n value[i] = functor(id);\n id += kThreadsPerBlock;\n }\n }\n\n id = start;\n#pragma unroll\n for (int i = 0; i < kElementsPerThread; ++i) {\n if (id < N) {\n output_data[id] = value[i];\n id += kThreadsPerBlock;\n }\n }\n}\n\ntemplate \nvoid LaunchElementwiseKernel(cudaStream_t stream, T* output_data, const FuncT& functor, size_t output_size) {\n if (output_size == 0) return;\n CUDA_LONG N = static_cast(output_size);\n int blocksPerGrid = CeilDiv(N, kThreadsPerBlock * kElementsPerThread);\n ElementwiseKernel<<>>(output_data, functor, N);\n}\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " #include \"hip/hip_runtime.h\"\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/cu_inc/common.cuh\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\n#ifdef USE_ROCM\nconstexpr int kElementsPerThread = 2;\nconstexpr int kThreadsPerBlock = 512;\n#else\nconstexpr int kElementsPerThread = GridDim::maxElementsPerThread;\nconstexpr int kThreadsPerBlock = GridDim::maxThreadsPerBlock;\n#endif\n\ntemplate \n__global__ void ElementwiseKernel(T* output_data, const FuncT functor, HIP_LONG N) {\n HIP_LONG start = kElementsPerThread * kThreadsPerBlock * blockIdx.x + threadIdx.x;\n T value[kElementsPerThread];\n\n HIP_LONG id = start;\n#pragma unroll\n for (int i = 0; i < kElementsPerThread; ++i) {\n if (id < N) {\n value[i] = functor(id);\n id += kThreadsPerBlock;\n }\n }\n\n id = start;\n#pragma unroll\n for (int i = 0; i < kElementsPerThread; ++i) {\n if (id < N) {\n output_data[id] = value[i];\n id += kThreadsPerBlock;\n }\n }\n}\n\ntemplate \nvoid LaunchElementwiseKernel(hipStream_t stream, T* output_data, const FuncT& functor, size_t output_size) {\n if (output_size == 0) return;\n HIP_LONG N = static_cast(output_size);\n int blocksPerGrid = CeilDiv(N, kThreadsPerBlock * kElementsPerThread);\n ElementwiseKernel<<>>(output_data, functor, N);\n}\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#define TOPK_IMPL_TYPE double\n#include \"topk_impl.cuh\"\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#define TOPK_IMPL_TYPE double\n#include \"topk_impl.cuh\"\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#define TOPK_IMPL_TYPE int32_t\n#include \"topk_impl.cuh\"\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#define TOPK_IMPL_TYPE int32_t\n#include \"topk_impl.cuh\"\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#define TOPK_IMPL_TYPE int64_t\n#include \"topk_impl.cuh\"\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#define TOPK_IMPL_TYPE int64_t\n#include \"topk_impl.cuh\"\n###" }, { "cuda": "\n\n\n#pragma once\n#include \"core/providers/cuda/cuda_kernel.h\"\nnamespace onnxruntime {\nnamespace cuda {\nstruct UnaryElementwisePreparation {\n const Tensor* input_tensor = nullptr;\n Tensor* output_tensor = nullptr;\n};\nclass UnaryElementwise : public CudaKernel {\n protected:\n UnaryElementwise(const OpKernelInfo& info) : CudaKernel(info) {}\n Status ComputeInternal(OpKernelContext*) const override {\n return Status(common::ONNXRUNTIME, common::FAIL); \n }\n Status Prepare(OpKernelContext* context, UnaryElementwisePreparation* p) const;\n};\ntemplate \nclass Abs final : public UnaryElementwise {\n public:\n Abs(const OpKernelInfo& info) : UnaryElementwise(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\ntemplate \nclass Neg final : public UnaryElementwise {\n public:\n Neg(const OpKernelInfo& info) : UnaryElementwise(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\ntemplate \nclass Floor final : public UnaryElementwise {\n public:\n Floor(const OpKernelInfo& info) : UnaryElementwise(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\ntemplate \nclass Ceil final : public UnaryElementwise {\n public:\n Ceil(const OpKernelInfo& info) : UnaryElementwise(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\ntemplate \nclass Reciprocal final : public UnaryElementwise {\n public:\n Reciprocal(const OpKernelInfo& info) : UnaryElementwise(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\ntemplate \nclass Sqrt final : public UnaryElementwise {\n public:\n Sqrt(const OpKernelInfo& info) : UnaryElementwise(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\ntemplate \nclass Log final : public UnaryElementwise {\n public:\n Log(const OpKernelInfo& info) : UnaryElementwise(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\ntemplate \nclass Exp final : public UnaryElementwise {\n public:\n Exp(const OpKernelInfo& info) : UnaryElementwise(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\ntemplate \nclass Erf final : public UnaryElementwise {\n public:\n Erf(const OpKernelInfo& info) : UnaryElementwise(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\ntemplate \nclass Not final : public UnaryElementwise {\n public:\n Not(const OpKernelInfo& info) : UnaryElementwise(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\ntemplate \nclass Round final : public UnaryElementwise {\n public:\n Round(const OpKernelInfo& info) : UnaryElementwise(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\ntemplate \nclass Sin final : public UnaryElementwise {\n public:\n Sin(const OpKernelInfo& info) : UnaryElementwise(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\ntemplate \nclass Cos final : public UnaryElementwise {\n public:\n Cos(const OpKernelInfo& info) : UnaryElementwise(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n} \n} \n\n###", "hip": " \n\n#pragma once\n#include \"core/providers/rocm/rocm_kernel.h\"\nnamespace onnxruntime {\nnamespace rocm {\nstruct UnaryElementwisePreparation {\n const Tensor* input_tensor = nullptr;\n Tensor* output_tensor = nullptr;\n};\nclass UnaryElementwise : public RocmKernel {\n protected:\n UnaryElementwise(const OpKernelInfo& info) : RocmKernel(info) {}\n Status ComputeInternal(OpKernelContext*) const override {\n return Status(common::ONNXRUNTIME, common::FAIL); \n }\n Status Prepare(OpKernelContext* context, UnaryElementwisePreparation* p) const;\n};\ntemplate \nclass Abs final : public UnaryElementwise {\n public:\n Abs(const OpKernelInfo& info) : UnaryElementwise(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\ntemplate \nclass Neg final : public UnaryElementwise {\n public:\n Neg(const OpKernelInfo& info) : UnaryElementwise(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\ntemplate \nclass Floor final : public UnaryElementwise {\n public:\n Floor(const OpKernelInfo& info) : UnaryElementwise(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\ntemplate \nclass Ceil final : public UnaryElementwise {\n public:\n Ceil(const OpKernelInfo& info) : UnaryElementwise(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\ntemplate \nclass Reciprocal final : public UnaryElementwise {\n public:\n Reciprocal(const OpKernelInfo& info) : UnaryElementwise(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\ntemplate \nclass Sqrt final : public UnaryElementwise {\n public:\n Sqrt(const OpKernelInfo& info) : UnaryElementwise(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\ntemplate \nclass Log final : public UnaryElementwise {\n public:\n Log(const OpKernelInfo& info) : UnaryElementwise(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\ntemplate \nclass Exp final : public UnaryElementwise {\n public:\n Exp(const OpKernelInfo& info) : UnaryElementwise(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\ntemplate \nclass Erf final : public UnaryElementwise {\n public:\n Erf(const OpKernelInfo& info) : UnaryElementwise(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\ntemplate \nclass Not final : public UnaryElementwise {\n public:\n Not(const OpKernelInfo& info) : UnaryElementwise(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\ntemplate \nclass Round final : public UnaryElementwise {\n public:\n Round(const OpKernelInfo& info) : UnaryElementwise(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\ntemplate \nclass Sin final : public UnaryElementwise {\n public:\n Sin(const OpKernelInfo& info) : UnaryElementwise(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\ntemplate \nclass Cos final : public UnaryElementwise {\n public:\n Cos(const OpKernelInfo& info) : UnaryElementwise(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n} \n} ###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n#include \n\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nusing InputTensorVector = std::vector>;\n\ntemplate \nclass VariadicElementwiseOp : public CudaKernel {\n public:\n VariadicElementwiseOp(const OpKernelInfo& info) : CudaKernel(info) {}\n\n private:\n Status ComputeInternal(OpKernelContext* context) const override;\n\n template \n struct NoBroadcastBatchImplDispatchTarget {\n Status operator()(cudaStream_t stream, const InputTensorVector& inputs, Tensor& output) const;\n };\n\n template \n struct BinaryImplDispatchTarget {\n Status operator()(cudaStream_t stream, const Tensor& lhs, const Tensor& rhs, Tensor& output) const;\n };\n\n template \n struct GeneralImplDispatchTarget {\n Status operator()(cudaStream_t stream, const InputTensorVector& inputs, Tensor& output) const;\n };\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n#include \n\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nusing InputTensorVector = std::vector>;\n\ntemplate \nclass VariadicElementwiseOp : public RocmKernel {\n public:\n VariadicElementwiseOp(const OpKernelInfo& info) : RocmKernel(info) {}\n\n private:\n Status ComputeInternal(OpKernelContext* context) const override;\n\n template \n struct NoBroadcastBatchImplDispatchTarget {\n Status operator()(hipStream_t stream, const InputTensorVector& inputs, Tensor& output) const;\n };\n\n template \n struct BinaryImplDispatchTarget {\n Status operator()(hipStream_t stream, const Tensor& lhs, const Tensor& rhs, Tensor& output) const;\n };\n\n template \n struct GeneralImplDispatchTarget {\n Status operator()(hipStream_t stream, const InputTensorVector& inputs, Tensor& output) const;\n };\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n\n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nvoid Impl_General(\n cudaStream_t stream,\n int32_t output_rank_or_simple_broadcast,\n const TArray* lhs_padded_strides,\n const T* lhs_data,\n const TArray* rhs_padded_strides,\n const T* rhs_data,\n const TArray* fdm_output_strides,\n const fast_divmod& fdm_H,\n const fast_divmod& fdm_C,\n T* output_data,\n size_t count);\n\nconstexpr int32_t k_max_input_batch_size = 8;\n\ntemplate \nusing InputBatchArray = TArray;\n\ntemplate \nvoid Impl_NoBroadcastInputBatch(\n cudaStream_t stream,\n InputBatchArray input_data_batch,\n T* output_data,\n size_t count);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n\n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nvoid Impl_General(\n hipStream_t stream,\n int32_t output_rank_or_simple_broadcast,\n const TArray* lhs_padded_strides,\n const T* lhs_data,\n const TArray* rhs_padded_strides,\n const T* rhs_data,\n const TArray* fdm_output_strides,\n const fast_divmod& fdm_H,\n const fast_divmod& fdm_C,\n T* output_data,\n size_t count);\n\nconstexpr int32_t k_max_input_batch_size = 8;\n\ntemplate \nusing InputBatchArray = TArray;\n\ntemplate \nvoid Impl_NoBroadcastInputBatch(\n hipStream_t stream,\n InputBatchArray input_data_batch,\n T* output_data,\n size_t count);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\nnamespace onnxruntime {\nnamespace cuda {\nnamespace variadic_elementwise_ops {\nstruct Sum {};\nstruct Min {};\nstruct Max {};\n} // namespace variadic_elementwise_ops\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\nnamespace onnxruntime {\nnamespace rocm {\nnamespace variadic_elementwise_ops {\nstruct Sum {};\nstruct Min {};\nstruct Max {};\n} // namespace variadic_elementwise_ops\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"core/providers/cuda/cudnn_common.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nclass BatchNorm final : public CudaKernel {\n public:\n BatchNorm(const OpKernelInfo& op_kernel_info)\n : CudaKernel{op_kernel_info},\n cudnn_batch_norm_mode_(CUDNN_BATCHNORM_SPATIAL),\n momentum_(0.9) {\n float tmp_epsilon;\n ORT_ENFORCE(op_kernel_info.GetAttr(\"epsilon\", &tmp_epsilon).IsOK());\n epsilon_ = ClampCudnnBatchNormEpsilon(static_cast(tmp_epsilon));\n\n // spatial or not\n int64_t tmp_spatial;\n if (op_kernel_info.GetAttr(\"spatial\", &tmp_spatial).IsOK()) {\n spatial_ = tmp_spatial;\n }\n\n if (spatial_ == 0) {\n cudnn_batch_norm_mode_ = CUDNN_BATCHNORM_PER_ACTIVATION;\n }\n\n float tmp_momentum;\n if (op_kernel_info.GetAttr(\"momentum\", &tmp_momentum).IsOK()) {\n momentum_ = static_cast(tmp_momentum);\n }\n\n is_training_mode_ = (op_kernel_info.GetAttrOrDefault(\"training_mode\", 0) == 1);\n const auto& node = op_kernel_info.node();\n auto opset = node.SinceVersion();\n\n // batch norm opset 14 (or higher) is not implemented for training mode\n ORT_ENFORCE(!(is_training_mode_ && opset >= 14), \"Training mode does not support BN opset 14 (or higher) yet.\");\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n double epsilon_;\n int64_t spatial_ = 1; // default as per spec\n cudnnBatchNormMode_t cudnn_batch_norm_mode_;\n double momentum_;\n bool is_training_mode_ = 0; // default as per spec\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"core/providers/rocm/miopen_common.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nclass BatchNorm final : public RocmKernel {\n public:\n BatchNorm(const OpKernelInfo& op_kernel_info)\n : RocmKernel{op_kernel_info},\n miopen_batch_norm_mode_(miopenBNSpatial),\n momentum_(0.9) {\n float tmp_epsilon;\n ORT_ENFORCE(op_kernel_info.GetAttr(\"epsilon\", &tmp_epsilon).IsOK());\n epsilon_ = ClampMiopenBatchNormEpsilon(static_cast(tmp_epsilon));\n\n // spatial or not\n int64_t tmp_spatial;\n if (op_kernel_info.GetAttr(\"spatial\", &tmp_spatial).IsOK()) {\n spatial_ = tmp_spatial;\n }\n\n if (spatial_ == 0) {\n miopen_batch_norm_mode_ = miopenBNPerActivation;\n }\n\n float tmp_momentum;\n if (op_kernel_info.GetAttr(\"momentum\", &tmp_momentum).IsOK()) {\n momentum_ = static_cast(tmp_momentum);\n }\n\n is_training_mode_ = (op_kernel_info.GetAttrOrDefault(\"training_mode\", 0) == 1);\n const auto& node = op_kernel_info.node();\n auto opset = node.SinceVersion();\n\n // batch norm opset 14 (or higher) is not implemented for training mode\n ORT_ENFORCE(!(is_training_mode_ && opset >= 14), \"Training mode does not support BN opset 14 (or higher) yet.\");\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n double epsilon_;\n int64_t spatial_ = 1; // default as per spec\n miopenBatchNormMode_t miopen_batch_norm_mode_;\n double momentum_;\n bool is_training_mode_ = 0; // default as per spec\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"core/providers/common.h\"\n#include \"core/framework/random_generator.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nclass Dropout final : public CudaKernel {\n public:\n Dropout(const OpKernelInfo& info) : CudaKernel(info) {\n int64_t seed = 0;\n if (info.GetAttr(\"seed\", &seed).IsOK()) {\n generator_ = std::make_unique(static_cast(seed));\n }\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n mutable std::unique_ptr generator_;\n static constexpr float default_ratio_ = 0.5f;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"core/providers/common.h\"\n#include \"core/framework/random_generator.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nclass Dropout final : public RocmKernel {\n public:\n Dropout(const OpKernelInfo& info) : RocmKernel(info) {\n int64_t seed = 0;\n if (info.GetAttr(\"seed\", &seed).IsOK()) {\n generator_ = std::make_unique(static_cast(seed));\n }\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n mutable std::unique_ptr generator_;\n static constexpr float default_ratio_ = 0.5f;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/framework/random_generator.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nvoid DropoutKernelImpl(const cudaDeviceProp& prop, cudaStream_t stream, const int64_t N,\n const int64_t mask_element_count, const float ratio, PhiloxGenerator& generator, const T* X_data,\n T* Y_data, void* mask_data, bool use_bitmask);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/framework/random_generator.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nvoid DropoutKernelImpl(const hipDeviceProp_t& prop, hipStream_t stream, const int64_t N,\n const int64_t mask_element_count, const float ratio, PhiloxGenerator& generator, const T* X_data,\n T* Y_data, void* mask_data, bool use_bitmask);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n#include \"core/providers/cuda/cu_inc/common.cuh\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \n__global__ void _UnaryElementWise(\n const InT* input_data,\n OutT* output_data,\n const FuncT functor,\n CUDA_LONG N) {\n CUDA_LONG start = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x;\n InT value[NumElementsPerThread];\n\n CUDA_LONG id = start;\n #pragma unroll\n for (int i = 0; i < NumElementsPerThread; i++) {\n if (id < N) {\n value[i] = input_data[id];\n id += NumThreadsPerBlock;\n }\n }\n\n id = start;\n #pragma unroll\n for (int i = 0; i < NumElementsPerThread; i++) {\n if (id < N) {\n output_data[id] = functor(value[i]);\n id += NumThreadsPerBlock;\n }\n }\n}\n\ntemplate \nvoid UnaryElementWiseImpl(\n cudaStream_t stream,\n const InT* input_data,\n OutT* output_data,\n const FuncT& func,\n size_t count) {\n if (count == 0) // special case where there's a dim value of 0 in the shape\n return;\n\n int blocksPerGrid = static_cast(CeilDiv(count, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread));\n CUDA_LONG N = static_cast(count);\n _UnaryElementWise\n <<>>(\n input_data,\n output_data,\n func,\n N);\n}\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " #include \"hip/hip_runtime.h\"\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n#include \"core/providers/rocm/cu_inc/common.cuh\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \n__global__ void _UnaryElementWise(\n const InT* input_data,\n OutT* output_data,\n const FuncT functor,\n HIP_LONG N) {\n HIP_LONG start = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x;\n InT value[NumElementsPerThread];\n\n HIP_LONG id = start;\n #pragma unroll\n for (int i = 0; i < NumElementsPerThread; i++) {\n if (id < N) {\n value[i] = input_data[id];\n id += NumThreadsPerBlock;\n }\n }\n\n id = start;\n #pragma unroll\n for (int i = 0; i < NumElementsPerThread; i++) {\n if (id < N) {\n output_data[id] = functor(value[i]);\n id += NumThreadsPerBlock;\n }\n }\n}\n\ntemplate \nvoid UnaryElementWiseImpl(\n hipStream_t stream,\n const InT* input_data,\n OutT* output_data,\n const FuncT& func,\n size_t count) {\n if (count == 0) // special case where there's a dim value of 0 in the shape\n return;\n\n int blocksPerGrid = static_cast(CeilDiv(count, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread));\n HIP_LONG N = static_cast(count);\n _UnaryElementWise\n <<>>(\n input_data,\n output_data,\n func,\n N);\n}\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"core/providers/cuda/cudnn_common.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nclass InstanceNorm final : public CudaKernel {\n public:\n InstanceNorm(const OpKernelInfo& op_kernel_info);\n Status ComputeInternal(OpKernelContext* p_op_kernel_context) const override;\n\n private:\n double epsilon_;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"core/providers/rocm/miopen_common.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nclass InstanceNorm final : public RocmKernel {\n public:\n InstanceNorm(const OpKernelInfo& op_kernel_info);\n Status ComputeInternal(OpKernelContext* p_op_kernel_context) const override;\n\n private:\n double epsilon_;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/cuda/cu_inc/common.cuh\"\n#include \"instance_norm_impl.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \n__global__ void _InstanceNormKernel(\n const T1* __restrict__ input_data,\n const T1* __restrict__ scale,\n const T1* __restrict__ bias,\n const T2* __restrict__ mean,\n const T2* __restrict__ variance,\n const double variance_correction,\n const double epsilon,\n const fast_divmod fdm_HW,\n const fast_divmod fdm_C,\n T1* __restrict__ output_data,\n const CUDA_LONG N) {\n CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);\n int nc = fdm_HW.div(id);\n int n, c;\n fdm_C.divmod(nc, n, c);\n\n // Y = scale * (x - mean) / sqrt (std * std + epsilon) + B\n output_data[id] = scale[c] * (input_data[id] - (T1)mean[nc]) / _Sqrt((T1)variance[nc] * (T1)variance_correction + (T1)epsilon) + bias[c];\n}\n\ntemplate \nvoid InstanceNormImpl(\n cudaStream_t stream,\n const T1* input_data,\n const T1* scale,\n const T1* bias,\n const T2* mean,\n const T2* variance,\n const double variance_correction,\n const double epsilon,\n const fast_divmod& fdm_HW,\n const fast_divmod& fdm_C,\n T1* output_data,\n size_t N) {\n int blocksPerGrid = (int)(ceil(static_cast(N) / GridDim::maxThreadsPerBlock));\n _InstanceNormKernel<<>>(\n input_data, scale, bias, mean, variance, variance_correction, epsilon, fdm_HW, fdm_C, output_data, (CUDA_LONG)N);\n}\n\n#define SPECIALIZED_IMPL(T1, T2) \\\n template void InstanceNormImpl(cudaStream_t stream, const T1* input_data, const T1* scale, const T1* bias, const T2* mean, const T2* stddev, const double variance_correction, const double epsilon, const fast_divmod& fdm_HW, const fast_divmod& fdm_C, T1* output_data, size_t count);\n\nSPECIALIZED_IMPL(float, float)\nSPECIALIZED_IMPL(double, double)\n// When the input data type is float16, the means and variances will flow in as float32 (special case)\nSPECIALIZED_IMPL(half, float)\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " #include \"hip/hip_runtime.h\"\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/rocm/cu_inc/common.cuh\"\n#include \"instance_norm_impl.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \n__global__ void _InstanceNormKernel(\n const T1* __restrict__ input_data,\n const T1* __restrict__ scale,\n const T1* __restrict__ bias,\n const T2* __restrict__ mean,\n const T2* __restrict__ variance,\n const double variance_correction,\n const double epsilon,\n const fast_divmod fdm_HW,\n const fast_divmod fdm_C,\n T1* __restrict__ output_data,\n const HIP_LONG N) {\n CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);\n int nc = fdm_HW.div(id);\n int n, c;\n fdm_C.divmod(nc, n, c);\n\n // Y = scale * (x - mean) / sqrt (std * std + epsilon) + B\n output_data[id] = scale[c] * (input_data[id] - (T1)mean[nc]) / _Sqrt((T1)variance[nc] * (T1)variance_correction + (T1)epsilon) + bias[c];\n}\n\ntemplate \nvoid InstanceNormImpl(\n hipStream_t stream,\n const T1* input_data,\n const T1* scale,\n const T1* bias,\n const T2* mean,\n const T2* variance,\n const double variance_correction,\n const double epsilon,\n const fast_divmod& fdm_HW,\n const fast_divmod& fdm_C,\n T1* output_data,\n size_t N) {\n int blocksPerGrid = (int)(ceil(static_cast(N) / GridDim::maxThreadsPerBlock));\n _InstanceNormKernel<<>>(\n input_data, scale, bias, mean, variance, variance_correction, epsilon, fdm_HW, fdm_C, output_data, (HIP_LONG)N);\n}\n\n#define SPECIALIZED_IMPL(T1, T2) \\\n template void InstanceNormImpl(hipStream_t stream, const T1* input_data, const T1* scale, const T1* bias, const T2* mean, const T2* stddev, const double variance_correction, const double epsilon, const fast_divmod& fdm_HW, const fast_divmod& fdm_C, T1* output_data, size_t count);\n\nSPECIALIZED_IMPL(float, float)\nSPECIALIZED_IMPL(double, double)\n// When the input data type is float16, the means and variances will flow in as float32 (special case)\nSPECIALIZED_IMPL(half, float)\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/cuda/shared_inc/fast_divmod.h\"\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nvoid InstanceNormImpl(\n cudaStream_t stream,\n const T1* input_data,\n const T1* scale,\n const T1* bias,\n const T2* mean,\n const T2* variance,\n const double variance_correction,\n const double epsilon,\n const fast_divmod& fdm_HW,\n const fast_divmod& fdm_C,\n T1* output_data,\n size_t count);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/rocm/shared_inc/fast_divmod.h\"\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nvoid InstanceNormImpl(\n hipStream_t stream,\n const T1* input_data,\n const T1* scale,\n const T1* bias,\n const T2* mean,\n const T2* variance,\n const double variance_correction,\n const double epsilon,\n const fast_divmod& fdm_HW,\n const fast_divmod& fdm_C,\n T1* output_data,\n size_t count);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nusing namespace onnxruntime::cuda;\n\n// NOTE: This was originally a contrib op with 3 type constraints. The ONNX spec merges 'T' and 'V'.\n// the kernel is templatized on all three for backwards compatibility, but in ONNX usage T == V.\ntemplate \nclass LayerNorm final : public CudaKernel {\n public:\n LayerNorm(const OpKernelInfo& op_kernel_info);\n\n Status ComputeInternal(OpKernelContext* ctx) const override;\n\n private:\n int64_t axis_;\n double epsilon_;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nusing namespace onnxruntime::rocm;\n\n// NOTE: This was originally a contrib op with 3 type constraints. The ONNX spec merges 'T' and 'V'.\n// the kernel is templatized on all three for backwards compatibility, but in ONNX usage T == V.\ntemplate \nclass LayerNorm final : public RocmKernel {\n public:\n LayerNorm(const OpKernelInfo& op_kernel_info);\n\n Status ComputeInternal(OpKernelContext* ctx) const override;\n\n private:\n int64_t axis_;\n double epsilon_;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n/**\n * Copyright (c) 2016-present, Facebook, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n//\n// Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.\n// NVIDIA/apex is licensed under the\n// BSD 3 - Clause \"New\" or \"Revised\" License\n//\n\n/* Modifications Copyright (c) Microsoft. */\n\n#pragma once\n#include \"core/providers/cuda/cuda_common.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nvoid HostApplyLayerNorm(\n const cudaDeviceProp& prop,\n cudaStream_t stream,\n V* output,\n U* mean,\n U* invvar,\n const T* input,\n int n1,\n int n2,\n double epsilon,\n const V* gamma,\n const V* beta,\n const T* skip = nullptr,\n const T* bias = nullptr,\n T* skip_input_bias_add_output = nullptr);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " /**\n * Copyright (c) 2016-present, Facebook, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n//\n// Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.\n// NVIDIA/apex is licensed under the\n// BSD 3 - Clause \"New\" or \"Revised\" License\n//\n\n/* Modifications Copyright (c) Microsoft. */\n\n#pragma once\n#include \"core/providers/rocm/rocm_common.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nvoid HostApplyLayerNorm(\n const hipDeviceProp_t& prop,\n hipStream_t stream,\n V* output,\n U* mean,\n U* invvar,\n const T* input,\n int n1,\n int n2,\n double epsilon,\n const V* gamma,\n const V* beta,\n const T* skip = nullptr,\n const T* bias = nullptr,\n T* skip_input_bias_add_output = nullptr);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"core/providers/cuda/cudnn_common.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass CudnnLRNDescriptor final {\n public:\n CudnnLRNDescriptor();\n ~CudnnLRNDescriptor();\n Status Set(uint32_t N, double alpha, double beta, double K);\n operator cudnnLRNDescriptor_t() const { return desc_; }\n\n private:\n cudnnLRNDescriptor_t desc_;\n};\n\ntemplate \nclass LRN : public CudaKernel {\n public:\n LRN(const OpKernelInfo& info);\n Status ComputeInternal(OpKernelContext* p_op_kernel_context) const override;\n\n private:\n CudnnLRNDescriptor norm_desc_;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"core/providers/rocm/miopen_common.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass MiopenLRNDescriptor final {\n public:\n MiopenLRNDescriptor();\n ~MiopenLRNDescriptor();\n Status Set(uint32_t N, double alpha, double beta, double K);\n operator miopenLRNDescriptor_t() const { return desc_; }\n\n private:\n miopenLRNDescriptor_t desc_;\n};\n\ntemplate \nclass LRN : public RocmKernel {\n public:\n LRN(const OpKernelInfo& info);\n Status ComputeInternal(OpKernelContext* p_op_kernel_context) const override;\n\n private:\n MiopenLRNDescriptor norm_desc_;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \n\n#include \"core/framework/tensor_shape.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\ntemplate \nvoid MaxPoolWithIndex(\n cudaStream_t stream,\n const TensorShape& input_shape,\n const TensorShape& output_shape,\n const gsl::span& kernel_shape,\n const gsl::span& stride_shape,\n const gsl::span& pads,\n const gsl::span& dilations,\n int64_t storage_order,\n const T* p_input,\n T* p_output,\n int64_t* p_indices);\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \n\n#include \"core/framework/tensor_shape.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\ntemplate \nvoid MaxPoolWithIndex(\n hipStream_t stream,\n const TensorShape& input_shape,\n const TensorShape& output_shape,\n const gsl::span& kernel_shape,\n const gsl::span& stride_shape,\n const gsl::span& pads,\n const gsl::span& dilations,\n int64_t storage_order,\n const T* p_input,\n T* p_output,\n int64_t* p_indices);\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"shrink.h\"\n#include \"shrink_impl.h\"\n#include \"core/providers/common.h\"\n\nusing namespace std;\nnamespace onnxruntime {\nnamespace cuda {\n\n#define SHRINK_REGISTER_KERNEL(T) \\\n ONNX_OPERATOR_TYPED_KERNEL_EX( \\\n Shrink, \\\n kOnnxDomain, \\\n 9, \\\n T, \\\n kCudaExecutionProvider, \\\n (*KernelDefBuilder::Create()) \\\n .MayInplace(0, 0) \\\n .TypeConstraint(\"T\", DataTypeImpl::GetTensorType()), \\\n Shrink);\n\ntemplate \nStatus Shrink::ComputeInternal(OpKernelContext* p_op_kernel_context) const {\n typedef typename ToCudaType::MappedType CudaT;\n\n const Tensor* X = p_op_kernel_context->Input(0);\n const auto* x_data = reinterpret_cast(X->Data());\n const TensorShape& x_shape = X->Shape();\n const size_t x_size = static_cast(x_shape.Size());\n\n Tensor* Y = p_op_kernel_context->Output(0, x_shape);\n auto* y_data = reinterpret_cast(Y->MutableData());\n\n ShrinkImpl(Stream(p_op_kernel_context), x_data, bias_, lambd_, y_data, x_size);\n\n return Status::OK();\n}\n\nSHRINK_REGISTER_KERNEL(float)\nSHRINK_REGISTER_KERNEL(double)\nSHRINK_REGISTER_KERNEL(MLFloat16)\nSHRINK_REGISTER_KERNEL(uint8_t)\nSHRINK_REGISTER_KERNEL(int8_t)\nSHRINK_REGISTER_KERNEL(uint16_t)\nSHRINK_REGISTER_KERNEL(int16_t)\nSHRINK_REGISTER_KERNEL(uint32_t)\nSHRINK_REGISTER_KERNEL(int32_t)\nSHRINK_REGISTER_KERNEL(uint64_t)\nSHRINK_REGISTER_KERNEL(int64_t)\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"shrink.h\"\n#include \"shrink_impl.h\"\n#include \"core/providers/common.h\"\n\nusing namespace std;\nnamespace onnxruntime {\nnamespace rocm {\n\n#define SHRINK_REGISTER_KERNEL(T) \\\n ONNX_OPERATOR_TYPED_KERNEL_EX( \\\n Shrink, \\\n kOnnxDomain, \\\n 9, \\\n T, \\\n kRocmExecutionProvider, \\\n (*KernelDefBuilder::Create()) \\\n .MayInplace(0, 0) \\\n .TypeConstraint(\"T\", DataTypeImpl::GetTensorType()), \\\n Shrink);\n\ntemplate \nStatus Shrink::ComputeInternal(OpKernelContext* p_op_kernel_context) const {\n typedef typename ToHipType::MappedType HipT;\n\n const Tensor* X = p_op_kernel_context->Input(0);\n const auto* x_data = reinterpret_cast(X->Data());\n const TensorShape& x_shape = X->Shape();\n const size_t x_size = static_cast(x_shape.Size());\n\n Tensor* Y = p_op_kernel_context->Output(0, x_shape);\n auto* y_data = reinterpret_cast(Y->MutableData());\n\n ShrinkImpl(Stream(p_op_kernel_context), x_data, bias_, lambd_, y_data, x_size);\n\n return Status::OK();\n}\n\nSHRINK_REGISTER_KERNEL(float)\nSHRINK_REGISTER_KERNEL(double)\nSHRINK_REGISTER_KERNEL(MLFloat16)\nSHRINK_REGISTER_KERNEL(uint8_t)\nSHRINK_REGISTER_KERNEL(int8_t)\nSHRINK_REGISTER_KERNEL(uint16_t)\nSHRINK_REGISTER_KERNEL(int16_t)\nSHRINK_REGISTER_KERNEL(uint32_t)\nSHRINK_REGISTER_KERNEL(int32_t)\nSHRINK_REGISTER_KERNEL(uint64_t)\nSHRINK_REGISTER_KERNEL(int64_t)\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nclass Shrink final : public CudaKernel {\n public:\n Shrink(const OpKernelInfo& info) : CudaKernel(info) {\n float bias_temp;\n // if the attribute exists, use the value\n if (info.GetAttr(\"bias\", &bias_temp).IsOK())\n bias_ = bias_temp;\n\n float lambd_temp;\n // if the attribute exists, use the value\n if (info.GetAttr(\"lambd\", &lambd_temp).IsOK())\n lambd_ = lambd_temp;\n }\n\n Status ComputeInternal(OpKernelContext* p_op_kernel_context) const;\n\n private:\n float bias_ = 0.0f; // default as per spec\n float lambd_ = 0.5f; // default as per spec\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nclass Shrink final : public RocmKernel {\n public:\n Shrink(const OpKernelInfo& info) : RocmKernel(info) {\n float bias_temp;\n // if the attribute exists, use the value\n if (info.GetAttr(\"bias\", &bias_temp).IsOK())\n bias_ = bias_temp;\n\n float lambd_temp;\n // if the attribute exists, use the value\n if (info.GetAttr(\"lambd\", &lambd_temp).IsOK())\n lambd_ = lambd_temp;\n }\n\n Status ComputeInternal(OpKernelContext* p_op_kernel_context) const;\n\n private:\n float bias_ = 0.0f; // default as per spec\n float lambd_ = 0.5f; // default as per spec\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/cuda/cu_inc/common.cuh\"\n#include \"shrink_impl.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\n// Generic implementation of Shrink\ntemplate \n__global__ void _ShrinkKernel(\n const T* input_data,\n const float bias,\n const float lambda,\n T* output_data,\n const CUDA_LONG N) {\n CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);\n\n T x = input_data[id];\n if (x < -lambda) {\n output_data[id] = (T)(x + bias);\n } else if (x > lambda) {\n output_data[id] = (T)(x - bias);\n } else {\n output_data[id] = (T)0;\n }\n}\n\n// Specialized implementation for 'half' type\n// the idea is to convert 'half' data to 'float' first,\n// do the operation and convert result back to 'half'\ntemplate <>\n__global__ void _ShrinkKernel(\n const half* input_data,\n const float bias,\n const float lambda,\n half* output_data,\n const CUDA_LONG N) {\n CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);\n\n half x = input_data[id];\n if ((float)x < -lambda) {\n output_data[id] = half((float)x + bias);\n } else if ((float)x > lambda) {\n output_data[id] = half((float)x - bias);\n } else {\n output_data[id] = (half)0;\n }\n}\n\ntemplate \nvoid ShrinkImpl(\n cudaStream_t stream,\n const T* input_data,\n const float bias,\n const float lambda,\n T* output_data,\n size_t N) {\n int blocksPerGrid = (int)(ceil(static_cast(N) / GridDim::maxThreadsPerBlock));\n _ShrinkKernel<<>>(\n input_data, bias, lambda, output_data, (CUDA_LONG)N);\n}\n\n#define SPECIALIZED_IMPL(T) \\\n template void ShrinkImpl(cudaStream_t stream, const T* input_data, const float bias, const float lambda, T* output_data, size_t N);\n\nSPECIALIZED_IMPL(float)\nSPECIALIZED_IMPL(double)\nSPECIALIZED_IMPL(half)\nSPECIALIZED_IMPL(uint8_t)\nSPECIALIZED_IMPL(int8_t)\nSPECIALIZED_IMPL(uint16_t)\nSPECIALIZED_IMPL(int16_t)\nSPECIALIZED_IMPL(uint32_t)\nSPECIALIZED_IMPL(int32_t)\nSPECIALIZED_IMPL(uint64_t)\nSPECIALIZED_IMPL(int64_t)\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " #include \"hip/hip_runtime.h\"\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/rocm/cu_inc/common.cuh\"\n#include \"shrink_impl.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\n// Generic implementation of Shrink\ntemplate \n__global__ void _ShrinkKernel(\n const T* input_data,\n const float bias,\n const float lambda,\n T* output_data,\n const HIP_LONG N) {\n CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);\n\n T x = input_data[id];\n if (x < -lambda) {\n output_data[id] = (T)(x + bias);\n } else if (x > lambda) {\n output_data[id] = (T)(x - bias);\n } else {\n output_data[id] = (T)0;\n }\n}\n\n// Specialized implementation for 'half' type\n// the idea is to convert 'half' data to 'float' first,\n// do the operation and convert result back to 'half'\ntemplate <>\n__global__ void _ShrinkKernel(\n const half* input_data,\n const float bias,\n const float lambda,\n half* output_data,\n const HIP_LONG N) {\n CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);\n\n half x = input_data[id];\n if ((float)x < -lambda) {\n output_data[id] = half((float)x + bias);\n } else if ((float)x > lambda) {\n output_data[id] = half((float)x - bias);\n } else {\n output_data[id] = (half)0;\n }\n}\n\ntemplate \nvoid ShrinkImpl(\n hipStream_t stream,\n const T* input_data,\n const float bias,\n const float lambda,\n T* output_data,\n size_t N) {\n int blocksPerGrid = (int)(ceil(static_cast(N) / GridDim::maxThreadsPerBlock));\n _ShrinkKernel<<>>(\n input_data, bias, lambda, output_data, (HIP_LONG)N);\n}\n\n#define SPECIALIZED_IMPL(T) \\\n template void ShrinkImpl(hipStream_t stream, const T* input_data, const float bias, const float lambda, T* output_data, size_t N);\n\nSPECIALIZED_IMPL(float)\nSPECIALIZED_IMPL(double)\nSPECIALIZED_IMPL(half)\nSPECIALIZED_IMPL(uint8_t)\nSPECIALIZED_IMPL(int8_t)\nSPECIALIZED_IMPL(uint16_t)\nSPECIALIZED_IMPL(int16_t)\nSPECIALIZED_IMPL(uint32_t)\nSPECIALIZED_IMPL(int32_t)\nSPECIALIZED_IMPL(uint64_t)\nSPECIALIZED_IMPL(int64_t)\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n#pragma once\n\n#include \"core/providers/cuda/cu_inc/common.cuh\"\n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate <\n typename T, typename Func,\n int32_t max_input_batch_size, int32_t num_elements_per_thread>\n__global__ void VariadicElementWiseNoBroadcastInputBatchKernel(\n Func func,\n size_t N,\n TArray inputs,\n T* output) {\n const size_t base_idx = num_elements_per_thread * blockDim.x * blockIdx.x + threadIdx.x;\n\n T inputs_buffer[num_elements_per_thread][max_input_batch_size];\n\n int32_t element_count;\n size_t element_idx;\n\n#pragma unroll\n for (element_count = 0, element_idx = base_idx;\n element_count < num_elements_per_thread;\n ++element_count, element_idx += blockDim.x) {\n if (element_idx < N) {\n#pragma unroll\n for (int32_t input_batch_idx = 0; input_batch_idx < max_input_batch_size; ++input_batch_idx) {\n if (input_batch_idx < inputs.Size()) {\n inputs_buffer[element_count][input_batch_idx] = inputs[input_batch_idx][element_idx];\n }\n }\n }\n }\n\n#pragma unroll\n for (element_count = 0, element_idx = base_idx;\n element_count < num_elements_per_thread;\n ++element_count, element_idx += blockDim.x) {\n if (element_idx < N) {\n // first and second inputs\n T output_value = func(\n inputs_buffer[element_count][0], inputs_buffer[element_count][1]);\n\n // remaining inputs\n#pragma unroll\n for (int32_t input_batch_idx = 2; input_batch_idx < max_input_batch_size; ++input_batch_idx) {\n if (input_batch_idx < inputs.Size()) {\n output_value = func(output_value, inputs_buffer[element_count][input_batch_idx]);\n }\n }\n\n output[element_idx] = output_value;\n }\n }\n}\n\n// assumptions:\n// - inputs.Size() > 1 && inputs.Size() <= max_input_batch_size\n// - inputs and output have N elements\ntemplate \nvoid VariadicElementWiseNoBroadcastInputBatchImpl(\n cudaStream_t stream,\n Func func,\n size_t N,\n TArray inputs,\n T* output) {\n constexpr int32_t elements_per_thread = GridDim::maxElementsPerThread;\n constexpr int32_t threads_per_block = GridDim::maxThreadsPerBlock;\n const int32_t blocks_per_grid = static_cast(CeilDiv(N, elements_per_thread * threads_per_block));\n VariadicElementWiseNoBroadcastInputBatchKernel\n <<>>(func, N, inputs, output);\n}\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " #include \"hip/hip_runtime.h\"\n#pragma once\n\n#include \"core/providers/rocm/cu_inc/common.cuh\"\n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate <\n typename T, typename Func,\n int32_t max_input_batch_size, int32_t num_elements_per_thread>\n__global__ void VariadicElementWiseNoBroadcastInputBatchKernel(\n Func func,\n size_t N,\n TArray inputs,\n T* output) {\n const size_t base_idx = num_elements_per_thread * blockDim.x * blockIdx.x + threadIdx.x;\n\n T inputs_buffer[num_elements_per_thread][max_input_batch_size];\n\n int32_t element_count;\n size_t element_idx;\n\n#pragma unroll\n for (element_count = 0, element_idx = base_idx;\n element_count < num_elements_per_thread;\n ++element_count, element_idx += blockDim.x) {\n if (element_idx < N) {\n#pragma unroll\n for (int32_t input_batch_idx = 0; input_batch_idx < max_input_batch_size; ++input_batch_idx) {\n if (input_batch_idx < inputs.Size()) {\n inputs_buffer[element_count][input_batch_idx] = inputs[input_batch_idx][element_idx];\n }\n }\n }\n }\n\n#pragma unroll\n for (element_count = 0, element_idx = base_idx;\n element_count < num_elements_per_thread;\n ++element_count, element_idx += blockDim.x) {\n if (element_idx < N) {\n // first and second inputs\n T output_value = func(\n inputs_buffer[element_count][0], inputs_buffer[element_count][1]);\n\n // remaining inputs\n#pragma unroll\n for (int32_t input_batch_idx = 2; input_batch_idx < max_input_batch_size; ++input_batch_idx) {\n if (input_batch_idx < inputs.Size()) {\n output_value = func(output_value, inputs_buffer[element_count][input_batch_idx]);\n }\n }\n\n output[element_idx] = output_value;\n }\n }\n}\n\n// assumptions:\n// - inputs.Size() > 1 && inputs.Size() <= max_input_batch_size\n// - inputs and output have N elements\ntemplate \nvoid VariadicElementWiseNoBroadcastInputBatchImpl(\n hipStream_t stream,\n Func func,\n size_t N,\n TArray inputs,\n T* output) {\n constexpr int32_t elements_per_thread = GridDim::maxElementsPerThread;\n constexpr int32_t threads_per_block = GridDim::maxThreadsPerBlock;\n const int32_t blocks_per_grid = static_cast(CeilDiv(N, elements_per_thread * threads_per_block));\n VariadicElementWiseNoBroadcastInputBatchKernel\n <<>>(func, N, inputs, output);\n}\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nvoid ShrinkImpl(\n cudaStream_t stream,\n const T* input_data,\n const float bias,\n const float lambda,\n T* output_data,\n size_t count);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nvoid ShrinkImpl(\n hipStream_t stream,\n const T* input_data,\n const float bias,\n const float lambda,\n T* output_data,\n size_t count);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#ifdef ENABLE_NVTX_PROFILE\n#include \"nvtx_profile.h\"\n#include \"core/common/common.h\"\n#include \n#include \n\nnamespace onnxruntime {\nnamespace profile {\n\nvoid NvtxRangeCreator::BeginImpl() {\n // enable only for debug builds because this function is for profiling only.\n nvtxEventAttributes_t eventAttrib;\n eventAttrib.version = NVTX_VERSION;\n eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;\n eventAttrib.colorType = NVTX_COLOR_ARGB;\n eventAttrib.color = static_cast(color_);\n eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;\n eventAttrib.message.ascii = message_.c_str();\n\n range_id_ = nvtxRangeStartEx(&eventAttrib);\n}\n\nvoid NvtxRangeCreator::EndImpl() {\n // enable only for debug builds because this function is for profiling only.\n nvtxRangeEnd(range_id_);\n}\n\nvoid NvtxNestedRangeCreator::BeginImpl() {\n // enable only for debug builds because this function is for profiling only.\n nvtxEventAttributes_t eventAttrib;\n eventAttrib.version = NVTX_VERSION;\n eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;\n eventAttrib.colorType = NVTX_COLOR_ARGB;\n eventAttrib.color = static_cast(color_);\n eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;\n eventAttrib.message.ascii = message_.c_str();\n\n nvtxRangePushEx(&eventAttrib);\n}\n\nvoid NvtxNestedRangeCreator::EndImpl() {\n // enable only for debug builds because this function is for profiling only.\n nvtxRangePop();\n}\n\nvoid NvtxMarkerCreator::Mark() {\n // enable only for debug builds because this function is for profiling only.\n nvtxEventAttributes_t eventAttrib;\n eventAttrib.version = NVTX_VERSION;\n eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;\n eventAttrib.colorType = NVTX_COLOR_ARGB;\n eventAttrib.color = static_cast(color_);\n eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;\n eventAttrib.message.ascii = message_.c_str();\n\n nvtxMarkEx(&eventAttrib);\n}\n\n} // namespace profile\n} // namespace onnxruntime\n\n#endif\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#ifdef ENABLE_NVTX_PROFILE\n#include \"nvtx_profile.h\"\n#include \"core/common/common.h\"\n#include \n#include \n\nnamespace onnxruntime {\nnamespace profile {\n\nvoid NvtxRangeCreator::BeginImpl() {\n // enable only for debug builds because this function is for profiling only.\n nvtxEventAttributes_t eventAttrib;\n eventAttrib.version = NVTX_VERSION;\n eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;\n eventAttrib.colorType = NVTX_COLOR_ARGB;\n eventAttrib.color = static_cast(color_);\n eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;\n eventAttrib.message.ascii = message_.c_str();\n\n range_id_ = nvtxRangeStartEx(&eventAttrib);\n}\n\nvoid NvtxRangeCreator::EndImpl() {\n // enable only for debug builds because this function is for profiling only.\n nvtxRangeEnd(range_id_);\n}\n\nvoid NvtxNestedRangeCreator::BeginImpl() {\n // enable only for debug builds because this function is for profiling only.\n nvtxEventAttributes_t eventAttrib;\n eventAttrib.version = NVTX_VERSION;\n eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;\n eventAttrib.colorType = NVTX_COLOR_ARGB;\n eventAttrib.color = static_cast(color_);\n eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;\n eventAttrib.message.ascii = message_.c_str();\n\n nvtxRangePushEx(&eventAttrib);\n}\n\nvoid NvtxNestedRangeCreator::EndImpl() {\n // enable only for debug builds because this function is for profiling only.\n nvtxRangePop();\n}\n\nvoid NvtxMarkerCreator::Mark() {\n // enable only for debug builds because this function is for profiling only.\n nvtxEventAttributes_t eventAttrib;\n eventAttrib.version = NVTX_VERSION;\n eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;\n eventAttrib.colorType = NVTX_COLOR_ARGB;\n eventAttrib.color = static_cast(color_);\n eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII;\n eventAttrib.message.ascii = message_.c_str();\n\n nvtxMarkEx(&eventAttrib);\n}\n\n} // namespace profile\n} // namespace onnxruntime\n\n#endif###" }, { "cuda": "\n\n\n\n\n\n\n\n#ifdef ENABLE_NVTX_PROFILE\n#pragma once\n#include \n#include \n#include \n#include \n#include \n#include \"core/common/common.h\"\nnamespace onnxruntime {\nnamespace profile {\n\n\n\n\n\n\nenum class Color : uint32_t {\n Black = 0x00000000, Red = 0x00ff0000, DarkGreen = 0x00009900, Green = 0x0000ff00, LightGreen = 0x00ccffcc, Blue = 0x000000ff, Amber = 0x00ffbf00, LightAmber = 0x00fff2cc, White = 0x00ffffff, Cyan = 0x0000ffff, Magenta = 0x00ff00ff, Yellow = 0x00ffff00, };\nclass RangeCreatorBase {\n public:\n RangeCreatorBase(const std::string message, const Color color)\n : message_(message), color_(color), is_begin_called_(false), is_end_called_(false){};\n \n \n ~RangeCreatorBase() {\n if (!is_begin_called_) {\n std::cerr << \"Begin must be called once.\" << std::endl;\n }\n if (!is_end_called_) {\n std::cerr << \"End must be called once.\" << std::endl;\n }\n }\n \n void Begin() {\n ORT_ENFORCE(!is_begin_called_, \"Begin cannot be called more than once.\");\n ORT_ENFORCE(!is_end_called_, \"Begin cannot be called after calling End.\");\n BeginImpl();\n is_begin_called_ = true;\n }\n \n void End() {\n ORT_ENFORCE(is_begin_called_, \"End must be called after calling Begin.\");\n ORT_ENFORCE(!is_end_called_, \"End cannot be called more than once.\");\n EndImpl();\n is_end_called_ = true;\n }\n bool IsBeginCalled() const {\n return is_begin_called_;\n }\n bool IsEndCalled() const {\n return is_end_called_;\n }\n virtual void BeginImpl() = 0;\n virtual void EndImpl() = 0;\n protected:\n \n const std::string message_;\n \n const Color color_;\n bool is_begin_called_;\n bool is_end_called_;\n};\nclass NvtxRangeCreator final : public RangeCreatorBase {\n public:\n NvtxRangeCreator(const std::string message, const Color color)\n : RangeCreatorBase(message, color){};\n void BeginImpl() override;\n void EndImpl() override;\n private:\n \n \n uint64_t range_id_;\n};\nclass NvtxNestedRangeCreator final : public RangeCreatorBase {\n public:\n NvtxNestedRangeCreator(const std::string message, const Color color)\n : RangeCreatorBase(message, color){};\n void BeginImpl() override;\n void EndImpl() override;\n};\nclass NvtxMarkerCreator final {\n public:\n NvtxMarkerCreator(const std::string message, const Color color)\n : message_(message), color_(color){};\n void Mark();\n private:\n \n const std::string message_;\n \n const Color color_;\n};\n} \n} \n#endif\n\n###", "hip": " \n\n\n\n\n\n\n#ifdef ENABLE_NVTX_PROFILE\n#pragma once\n#include \n#include \n#include \n#include \n#include \n#include \"core/common/common.h\"\nnamespace onnxruntime {\nnamespace profile {\n\n\n\n\n\n\nenum class Color : uint32_t {\n Black = 0x00000000, Red = 0x00ff0000, DarkGreen = 0x00009900, Green = 0x0000ff00, LightGreen = 0x00ccffcc, Blue = 0x000000ff, Amber = 0x00ffbf00, LightAmber = 0x00fff2cc, White = 0x00ffffff, Cyan = 0x0000ffff, Magenta = 0x00ff00ff, Yellow = 0x00ffff00, };\nclass RangeCreatorBase {\n public:\n RangeCreatorBase(const std::string message, const Color color)\n : message_(message), color_(color), is_begin_called_(false), is_end_called_(false){};\n \n \n ~RangeCreatorBase() {\n if (!is_begin_called_) {\n std::cerr << \"Begin must be called once.\" << std::endl;\n }\n if (!is_end_called_) {\n std::cerr << \"End must be called once.\" << std::endl;\n }\n }\n \n void Begin() {\n ORT_ENFORCE(!is_begin_called_, \"Begin cannot be called more than once.\");\n ORT_ENFORCE(!is_end_called_, \"Begin cannot be called after calling End.\");\n BeginImpl();\n is_begin_called_ = true;\n }\n \n void End() {\n ORT_ENFORCE(is_begin_called_, \"End must be called after calling Begin.\");\n ORT_ENFORCE(!is_end_called_, \"End cannot be called more than once.\");\n EndImpl();\n is_end_called_ = true;\n }\n bool IsBeginCalled() const {\n return is_begin_called_;\n }\n bool IsEndCalled() const {\n return is_end_called_;\n }\n virtual void BeginImpl() = 0;\n virtual void EndImpl() = 0;\n protected:\n \n const std::string message_;\n \n const Color color_;\n bool is_begin_called_;\n bool is_end_called_;\n};\nclass NvtxRangeCreator final : public RangeCreatorBase {\n public:\n NvtxRangeCreator(const std::string message, const Color color)\n : RangeCreatorBase(message, color){};\n void BeginImpl() override;\n void EndImpl() override;\n private:\n \n \n uint64_t range_id_;\n};\nclass NvtxNestedRangeCreator final : public RangeCreatorBase {\n public:\n NvtxNestedRangeCreator(const std::string message, const Color color)\n : RangeCreatorBase(message, color){};\n void BeginImpl() override;\n void EndImpl() override;\n};\nclass NvtxMarkerCreator final {\n public:\n NvtxMarkerCreator(const std::string message, const Color color)\n : message_(message), color_(color){};\n void Mark();\n private:\n \n const std::string message_;\n \n const Color color_;\n};\n} \n} \n#endif###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n#include \n#include \n\n#include \"core/platform/ort_mutex.h\"\n\n#ifdef ENABLE_NVTX_PROFILE\n\nnamespace onnxruntime {\nnamespace profile {\n\n// Singleton class of managing global NVTX profiling information.\nclass Context {\n public:\n static Context& GetInstance() {\n static Context instance_;\n return instance_;\n }\n\n // Return tag for the specified thread.\n // If the thread's tag doesn't exist, this function returns an empty string.\n std::string GetThreadTagOrDefault(const std::thread::id& thread_id) {\n const std::lock_guard lock(mtx_);\n return thread_tag_[thread_id];\n }\n\n // Set tag for the specified thread.\n void SetThreadTag(\n const std::thread::id& thread_id, const std::string& tag) {\n const std::lock_guard lock(mtx_);\n thread_tag_[thread_id] = tag;\n }\n\n private:\n Context() = default;\n ~Context() = default;\n Context(const Context&) = delete;\n Context& operator=(const Context&) = delete;\n\n // map from thread's id to its human-readable tag.\n std::unordered_map thread_tag_;\n OrtMutex mtx_;\n};\n\n} // namespace profile\n} // namespace onnxruntime\n\n#endif\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n#include \n#include \n\n#include \"core/platform/ort_mutex.h\"\n\n#ifdef ENABLE_NVTX_PROFILE\n\nnamespace onnxruntime {\nnamespace profile {\n\n// Singleton class of managing global NVTX profiling information.\nclass Context {\n public:\n static Context& GetInstance() {\n static Context instance_;\n return instance_;\n }\n\n // Return tag for the specified thread.\n // If the thread's tag doesn't exist, this function returns an empty string.\n std::string GetThreadTagOrDefault(const std::thread::id& thread_id) {\n const std::lock_guard lock(mtx_);\n return thread_tag_[thread_id];\n }\n\n // Set tag for the specified thread.\n void SetThreadTag(\n const std::thread::id& thread_id, const std::string& tag) {\n const std::lock_guard lock(mtx_);\n thread_tag_[thread_id] = tag;\n }\n\n private:\n Context() = default;\n ~Context() = default;\n Context(const Context&) = delete;\n Context& operator=(const Context&) = delete;\n\n // map from thread's id to its human-readable tag.\n std::unordered_map thread_tag_;\n OrtMutex mtx_;\n};\n\n} // namespace profile\n} // namespace onnxruntime\n\n#endif\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/common/common.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"core/providers/cpu/object_detection/non_max_suppression.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nstruct NonMaxSuppression final : public CudaKernel, public NonMaxSuppressionBase {\n explicit NonMaxSuppression(const OpKernelInfo& info) : CudaKernel(info), NonMaxSuppressionBase(info) {\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(NonMaxSuppression);\n};\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/common/common.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"core/providers/cpu/object_detection/non_max_suppression.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nstruct NonMaxSuppression final : public RocmKernel, public NonMaxSuppressionBase {\n explicit NonMaxSuppression(const OpKernelInfo& info) : RocmKernel(info), NonMaxSuppressionBase(info) {\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(NonMaxSuppression);\n};\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n#include \n#include \"core/providers/cpu/object_detection/non_max_suppression_helper.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nStatus NonMaxSuppressionImpl(\n cudaStream_t stream,\n std::function(size_t)> allocator,\n const PrepareContext& pc,\n const int64_t center_point_box,\n int64_t batch_index,\n int64_t class_index,\n int max_output_boxes_per_class,\n float iou_threshold,\n float score_threshold,\n IAllocatorUniquePtr& selected_indices,\n int* h_number_selected);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n#include \n#include \"core/providers/cpu/object_detection/non_max_suppression_helper.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nStatus NonMaxSuppressionImpl(\n hipStream_t stream,\n std::function(size_t)> allocator,\n const PrepareContext& pc,\n const int64_t center_point_box,\n int64_t batch_index,\n int64_t class_index,\n int max_output_boxes_per_class,\n float iou_threshold,\n float score_threshold,\n IAllocatorUniquePtr& selected_indices,\n int* h_number_selected);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"roialign.h\"\n#include \"roialign_impl.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\n#define REGISTER_KERNEL_TYPED(T) \\\n ONNX_OPERATOR_TYPED_KERNEL_EX( \\\n RoiAlign, \\\n kOnnxDomain, \\\n 10, \\\n T, \\\n kCudaExecutionProvider, \\\n (*KernelDefBuilder::Create()) \\\n .TypeConstraint(\"T1\", DataTypeImpl::GetTensorType()) \\\n .TypeConstraint(\"T2\", DataTypeImpl::GetTensorType()), \\\n RoiAlign);\n\ntemplate \nStatus RoiAlign::ComputeInternal(OpKernelContext* context) const {\n // X\n const auto* X_ptr = context->Input(0);\n // rois\n const auto* rois_ptr = context->Input(1);\n // batch indices\n const auto* batch_indices_ptr = context->Input(2);\n\n const auto& x_dims = X_ptr->Shape();\n const auto& rois_dims = rois_ptr->Shape();\n const auto& batch_indices_dims = batch_indices_ptr->Shape();\n\n auto num_rois = batch_indices_dims[0];\n auto num_roi_cols = rois_dims[1];\n\n auto status = CheckROIAlignValidInput(X_ptr, rois_ptr, batch_indices_ptr);\n if (status != Status::OK()) {\n return status;\n }\n\n Tensor& Y = *context->Output(0, {num_rois, x_dims[1], this->output_height_, this->output_width_});\n int64_t output_size = Y.Shape().Size();\n\n if (output_size > 0) {\n RoiAlignImpl(\n Stream(context),\n output_size, // num threads\n reinterpret_cast::MappedType*>(X_ptr->Data()),\n ToCudaType::FromFloat(this->spatial_scale_),\n x_dims[1], // num channels\n x_dims[2], // height\n x_dims[3], // width\n this->output_height_,\n this->output_width_,\n this->sampling_ratio_,\n reinterpret_cast::MappedType*>(rois_ptr->Data()),\n num_roi_cols,\n reinterpret_cast::MappedType*>(Y.MutableData()),\n this->mode_ == RoiAlignMode::avg,\n this->half_pixel_,\n batch_indices_ptr->Data());\n }\n\n return Status::OK();\n}\n\n#define SPECIALIZED_COMPUTE(T) \\\n REGISTER_KERNEL_TYPED(T) \\\n template Status RoiAlign::ComputeInternal(OpKernelContext* ctx) const;\n\nSPECIALIZED_COMPUTE(float)\nSPECIALIZED_COMPUTE(double)\n// SPECIALIZED_COMPUTE(MLFloat16)\n\n} // namespace cuda\n}; // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"roialign.h\"\n#include \"roialign_impl.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\n#define REGISTER_KERNEL_TYPED(T) \\\n ONNX_OPERATOR_TYPED_KERNEL_EX( \\\n RoiAlign, \\\n kOnnxDomain, \\\n 10, \\\n T, \\\n kRocmExecutionProvider, \\\n (*KernelDefBuilder::Create()) \\\n .TypeConstraint(\"T1\", DataTypeImpl::GetTensorType()) \\\n .TypeConstraint(\"T2\", DataTypeImpl::GetTensorType()), \\\n RoiAlign);\n\ntemplate \nStatus RoiAlign::ComputeInternal(OpKernelContext* context) const {\n // X\n const auto* X_ptr = context->Input(0);\n // rois\n const auto* rois_ptr = context->Input(1);\n // batch indices\n const auto* batch_indices_ptr = context->Input(2);\n\n const auto& x_dims = X_ptr->Shape();\n const auto& rois_dims = rois_ptr->Shape();\n const auto& batch_indices_dims = batch_indices_ptr->Shape();\n\n auto num_rois = batch_indices_dims[0];\n auto num_roi_cols = rois_dims[1];\n\n auto status = CheckROIAlignValidInput(X_ptr, rois_ptr, batch_indices_ptr);\n if (status != Status::OK()) {\n return status;\n }\n\n Tensor& Y = *context->Output(0, {num_rois, x_dims[1], this->output_height_, this->output_width_});\n int64_t output_size = Y.Shape().Size();\n\n if (output_size > 0) {\n RoiAlignImpl(\n Stream(context),\n output_size, // num threads\n reinterpret_cast::MappedType*>(X_ptr->Data()),\n ToHipType::FromFloat(this->spatial_scale_),\n x_dims[1], // num channels\n x_dims[2], // height\n x_dims[3], // width\n this->output_height_,\n this->output_width_,\n this->sampling_ratio_,\n reinterpret_cast::MappedType*>(rois_ptr->Data()),\n num_roi_cols,\n reinterpret_cast::MappedType*>(Y.MutableData()),\n this->mode_ == RoiAlignMode::avg,\n this->half_pixel_,\n batch_indices_ptr->Data());\n }\n\n return Status::OK();\n}\n\n#define SPECIALIZED_COMPUTE(T) \\\n REGISTER_KERNEL_TYPED(T) \\\n template Status RoiAlign::ComputeInternal(OpKernelContext* ctx) const;\n\nSPECIALIZED_COMPUTE(float)\nSPECIALIZED_COMPUTE(double)\n// SPECIALIZED_COMPUTE(MLFloat16)\n\n} // namespace rocm\n}; // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/common/common.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"core/providers/cpu/object_detection/roialign.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nstruct RoiAlign final : CudaKernel, RoiAlignBase {\n RoiAlign(const OpKernelInfo& info) : CudaKernel(info), RoiAlignBase(info) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(RoiAlign);\n};\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/common/common.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"core/providers/cpu/object_detection/roialign.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nstruct RoiAlign final : RocmKernel, RoiAlignBase {\n RoiAlign(const OpKernelInfo& info) : RocmKernel(info), RoiAlignBase(info) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(RoiAlign);\n};\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nvoid RoiAlignImpl(\n cudaStream_t stream,\n const int64_t nthreads,\n const T* bottom_data,\n const T spatial_scale,\n const int64_t channels,\n const int64_t height,\n const int64_t width,\n const int64_t pooled_height,\n const int64_t pooled_width,\n const int64_t sampling_ratio,\n const T* bottom_rois,\n int64_t roi_cols,\n T* top_data,\n const bool is_mode_avg,\n const bool half_pixel,\n const int64_t* batch_indices_ptr);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nvoid RoiAlignImpl(\n hipStream_t stream,\n const int64_t nthreads,\n const T* bottom_data,\n const T spatial_scale,\n const int64_t channels,\n const int64_t height,\n const int64_t width,\n const int64_t pooled_height,\n const int64_t pooled_width,\n const int64_t sampling_ratio,\n const T* bottom_rois,\n int64_t roi_cols,\n T* top_data,\n const bool is_mode_avg,\n const bool half_pixel,\n const int64_t* batch_indices_ptr);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n\n\n#pragma once\n#include \"core/providers/cuda/cuda_common.h\"\n#include \"core/providers/cuda/shared_inc/accumulation_type.h\"\nnamespace onnxruntime {\nnamespace cuda {\nnamespace detail {\nsize_t compute_reduce_matrix_columns_intermediate_buffer_size(\n int element_size, int num_rows, int num_cols);\n} \n\ntemplate \nsize_t compute_reduce_matrix_columns_buffer_size(int m, int n) {\n using TBuf = AccumulationType_t;\n return detail::compute_reduce_matrix_columns_intermediate_buffer_size(\n sizeof(TBuf), m, n);\n}\n\ntemplate \nsize_t compute_reduction_buffer_size(int size) {\n using TBuf = AccumulationType_t;\n return detail::compute_reduce_matrix_columns_intermediate_buffer_size(\n sizeof(TBuf), 1, size);\n}\n\ntemplate \nStatus reduce_sum(cudaStream_t stream, const TIn* input, TOut* output, int size, void* buffer, size_t buffer_size);\n\ntemplate \nStatus reduce_square_sum(cudaStream_t stream, const TIn* input, TOut* output, int size, void* buffer, size_t buffer_size);\n\ntemplate \nStatus reduce_l2_norm(cudaStream_t stream, const TIn* input, TOut* output, int size, void* buffer, size_t buffer_size);\n\ntemplate \nStatus reduce_mean(cudaStream_t stream, const TIn* input, TOut* output, int size, void* buffer, size_t buffer_size);\nenum class ApplicableMatrixReduction {\n \n Rows, Columns, None, };\n\nApplicableMatrixReduction get_applicable_matrix_reduction(\n const cudnnReduceTensorOp_t cudnn_reduce_op, gsl::span dims, gsl::span axes, int& m, int& n);\n\ntemplate \nStatus reduce_matrix_rows(cudaStream_t stream, const TIn* input, TOut* output, int m, int n, bool reset_initial_output = true);\n\ntemplate \nStatus reduce_matrix_columns(cudaStream_t stream, const TIn* input, TOut* output, int m, int n, void* buffer, size_t buffer_size);\n\ntemplate \nvoid UnaryDiv(cudaStream_t stream, const T* input, T* output, T denominator, size_t count);\n} \n} \n\n###", "hip": " \n\n#pragma once\n#include \"core/providers/rocm/rocm_common.h\"\n#include \"core/providers/rocm/shared_inc/accumulation_type.h\"\nnamespace onnxruntime {\nnamespace rocm {\nnamespace detail {\nsize_t compute_reduce_matrix_columns_intermediate_buffer_size(\n int element_size, int num_rows, int num_cols);\n} \n\ntemplate \nsize_t compute_reduce_matrix_columns_buffer_size(int m, int n) {\n using TBuf = AccumulationType_t;\n return detail::compute_reduce_matrix_columns_intermediate_buffer_size(\n sizeof(TBuf), m, n);\n}\n\ntemplate \nsize_t compute_reduction_buffer_size(int size) {\n using TBuf = AccumulationType_t;\n return detail::compute_reduce_matrix_columns_intermediate_buffer_size(\n sizeof(TBuf), 1, size);\n}\n\ntemplate \nStatus reduce_sum(hipStream_t stream, const TIn* input, TOut* output, int size, void* buffer, size_t buffer_size);\n\ntemplate \nStatus reduce_square_sum(hipStream_t stream, const TIn* input, TOut* output, int size, void* buffer, size_t buffer_size);\n\ntemplate \nStatus reduce_l2_norm(hipStream_t stream, const TIn* input, TOut* output, int size, void* buffer, size_t buffer_size);\n\ntemplate \nStatus reduce_mean(hipStream_t stream, const TIn* input, TOut* output, int size, void* buffer, size_t buffer_size);\nenum class ApplicableMatrixReduction {\n \n Rows, Columns, None, };\n\nApplicableMatrixReduction get_applicable_matrix_reduction(\n const miopenReduceTensorOp_t miopen_reduce_op, gsl::span dims, gsl::span axes, int& m, int& n);\n\ntemplate \nStatus reduce_matrix_rows(hipStream_t stream, const TIn* input, TOut* output, int m, int n, bool reset_initial_output = true);\n\ntemplate \nStatus reduce_matrix_columns(hipStream_t stream, const TIn* input, TOut* output, int m, int n, void* buffer, size_t buffer_size);\n\ntemplate \nvoid UnaryDiv(hipStream_t stream, const T* input, T* output, T denominator, size_t count);\n} \n} ###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/cuda/cuda_check_memory.h\"\n#include \"core/providers/cuda/cuda_common.h\"\n\nnamespace onnxruntime {\nvoid CheckIfMemoryOnCurrentGpuDevice(const void* ptr) {\n cudaPointerAttributes attrs;\n CUDA_CALL_THROW(cudaPointerGetAttributes(&attrs, ptr));\n int current_device;\n CUDA_CALL_THROW(cudaGetDevice(¤t_device));\n ORT_ENFORCE(attrs.device == current_device,\n \"Current CUDA device is \", current_device,\n \" but the memory of pointer \", ptr,\n \" is allocated on device \", attrs.device);\n}\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/rocm/rocm_check_memory.h\"\n#include \"core/providers/rocm/rocm_common.h\"\n\nnamespace onnxruntime {\nvoid CheckIfMemoryOnCurrentGpuDevice(const void* ptr) {\n hipPointerAttribute_t attrs;\n HIP_CALL_THROW(hipPointerGetAttributes(&attrs, ptr));\n int current_device;\n HIP_CALL_THROW(hipGetDevice(¤t_device));\n ORT_ENFORCE(attrs.device == current_device,\n \"Current ROCM device is \", current_device,\n \" but the memory of pointer \", ptr,\n \" is allocated on device \", attrs.device);\n}\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/cuda/cu_inc/common.cuh\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\n__forceinline__ __host__ __device__ int least_pow2_bound(int value) {\n unsigned int value_ = static_cast(value);\n --value_;\n value_ |= value_ >> 1;\n value_ |= value_ >> 2;\n value_ |= value_ >> 4;\n value_ |= value_ >> 8;\n value_ |= value_ >> 16;\n return static_cast(++value_);\n}\n\nstruct Square {\n template \n __forceinline__ __device__ T operator()(const T& value) {\n return value * value;\n }\n};\n\nstruct Sqrt {\n template \n __forceinline__ __device__ T operator()(const T& value) {\n return _Sqrt(value);\n }\n};\n\nstruct Identity {\n template \n __forceinline__ __device__ T operator()(const T& value) {\n return value;\n }\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/rocm/cu_inc/common.cuh\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\n__forceinline__ __host__ __device__ int least_pow2_bound(int value) {\n unsigned int value_ = static_cast(value);\n --value_;\n value_ |= value_ >> 1;\n value_ |= value_ >> 2;\n value_ |= value_ >> 4;\n value_ |= value_ >> 8;\n value_ |= value_ >> 16;\n return static_cast(++value_);\n}\n\nstruct Square {\n template \n __forceinline__ __device__ T operator()(const T& value) {\n return value * value;\n }\n};\n\nstruct Sqrt {\n template \n __forceinline__ __device__ T operator()(const T& value) {\n return _Sqrt(value);\n }\n};\n\nstruct Identity {\n template \n __forceinline__ __device__ T operator()(const T& value) {\n return value;\n }\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n#include \"core/framework/float16.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\n// specifies the auxiliary type to use for accumulation of the given type\ntemplate \nstruct AccumulationType;\ntemplate <>\nstruct AccumulationType {\n using type = float;\n};\ntemplate <>\nstruct AccumulationType {\n using type = float;\n};\ntemplate <>\nstruct AccumulationType {\n using type = double;\n};\ntemplate <>\nstruct AccumulationType {\n using type = float;\n};\n\ntemplate \nusing AccumulationType_t = typename AccumulationType::type;\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n#include \"core/framework/float16.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\n// specifies the auxiliary type to use for accumulation of the given type\ntemplate \nstruct AccumulationType;\ntemplate <>\nstruct AccumulationType {\n using type = float;\n};\ntemplate <>\nstruct AccumulationType {\n using type = float;\n};\ntemplate <>\nstruct AccumulationType {\n using type = double;\n};\ntemplate <>\nstruct AccumulationType {\n using type = float;\n};\n\ntemplate \nusing AccumulationType_t = typename AccumulationType::type;\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n//\n// Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved\n// Licensed under the MIT license. See LICENSE.md file in the project root for full license information.\n//\n\n#pragma once\n\n#include \n#include \n#include \n#include \n#include \"core/common/common.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\n// The code below is based on section 4 Unsigned division of paper https://gmplib.org/~tege/divcnst-pldi94.pdf\n// In current ORT, fast_divmod is used for calculating the position of a element in tensor,\n// so unsigned integer division from the paper is good enough for ORT. The advantage is that div is very simple,\n// then GPU compiler can do loop unroll easilly when divmod is called in a loop.\nstruct fast_divmod {\n fast_divmod(int d = 1) {\n d_ = d == 0 ? 1 : d;\n ORT_ENFORCE(d_ >= 1 && d_ <= static_cast(std::numeric_limits::max()));\n\n for (l_ = 0; l_ < 32; l_++)\n if ((1U << l_) >= d_) break;\n\n uint64_t one = 1;\n uint64_t m = ((one << 32) * ((one << l_) - d_)) / d_ + 1;\n M_ = static_cast(m);\n // according to paper, the value of m' should fit in a unsigned integer.\n ORT_ENFORCE(M_ > 0 && M_ == m);\n }\n\n __host__ __device__ inline int div(int n) const {\n#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)\n uint32_t t = __umulhi(M_, n);\n return (t + n) >> l_;\n#else\n // Using uint64_t for t, then t + n won't overflow.\n uint64_t t = ((uint64_t)M_ * n) >> 32;\n return static_cast((t + n) >> l_);\n#endif\n }\n\n __host__ __device__ inline int mod(int n) const {\n return n - div(n) * d_;\n }\n\n __host__ __device__ inline void divmod(int n, int& q, int& r) const {\n q = div(n);\n r = n - q * d_;\n }\n\n uint32_t d_; // divisor\n uint32_t M_; // m' in the paper.\n uint32_t l_; // l_ = ceil(log2(d_))\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " //\n// Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved\n// Licensed under the MIT license. See LICENSE.md file in the project root for full license information.\n//\n\n#pragma once\n\n#include \n#include \n#include \n#include \n#include \"core/common/common.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\n// The code below is based on section 4 Unsigned division of paper https://gmplib.org/~tege/divcnst-pldi94.pdf\n// In current ORT, fast_divmod is used for calculating the position of a element in tensor,\n// so unsigned integer division from the paper is good enough for ORT. The advantage is that div is very simple,\n// then GPU compiler can do loop unroll easilly when divmod is called in a loop.\nstruct fast_divmod {\n fast_divmod(int d = 1) {\n d_ = d == 0 ? 1 : d;\n ORT_ENFORCE(d_ >= 1 && d_ <= static_cast(std::numeric_limits::max()));\n\n for (l_ = 0; l_ < 32; l_++)\n if ((1U << l_) >= d_) break;\n\n uint64_t one = 1;\n uint64_t m = ((one << 32) * ((one << l_) - d_)) / d_ + 1;\n M_ = static_cast(m);\n // according to paper, the value of m' should fit in a unsigned integer.\n ORT_ENFORCE(M_ > 0 && M_ == m);\n }\n\n __host__ __device__ inline int div(int n) const {\n#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)\n uint32_t t = __umulhi(M_, n);\n return (t + n) >> l_;\n#else\n // Using uint64_t for t, then t + n won't overflow.\n uint64_t t = ((uint64_t)M_ * n) >> 32;\n return static_cast((t + n) >> l_);\n#endif\n }\n\n __host__ __device__ inline int mod(int n) const {\n return n - div(n) * d_;\n }\n\n __host__ __device__ inline void divmod(int n, int& q, int& r) const {\n q = div(n);\n r = n - q * d_;\n }\n\n uint32_t d_; // divisor\n uint32_t M_; // m' in the paper.\n uint32_t l_; // l_ = ceil(log2(d_))\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\nStatus GemmInt8(int m,\n int n,\n int k,\n int32_t alpha_matmul,\n int32_t beta_matmul,\n const int8_t* a,\n int lda,\n const int8_t* b,\n int ldb,\n int32_t* c,\n int ldc,\n const CudaKernel* cuda_kernel,\n onnxruntime::Stream* stream);\n}\n} // namespace onnxruntime\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\nStatus GemmInt8(int m,\n int n,\n int k,\n int32_t alpha_matmul,\n int32_t beta_matmul,\n const int8_t* a,\n int lda,\n const int8_t* b,\n int ldb,\n int32_t* c,\n int ldc,\n const RocmKernel* rocm_kernel,\n onnxruntime::Stream* stream);\n}\n} // namespace onnxruntime###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nclass Cast final : public CudaKernel {\n public:\n Cast(const OpKernelInfo& info) : CudaKernel(info) {\n int64_t to;\n Status status = info.GetAttr(\"to\", &to);\n ORT_ENFORCE(status.IsOK(), \"Attribute to is not set.\");\n to_ = gsl::narrow_cast(to);\n\n int64_t saturate = info.GetAttrOrDefault(\"saturate\", int64_t{1});\n if (saturate == 0 &&\n to != ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT8E4M3FN &&\n to != ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT8E4M3FNUZ &&\n to != ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT8E5M2 &&\n to != ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT8E5M2FNUZ) {\n ORT_THROW(\"Attribute saturate is only used for cast to float 8 types.\");\n }\n saturate_ = saturate == 1;\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n ONNX_NAMESPACE::TensorProto_DataType to_;\n bool saturate_;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nclass Cast final : public RocmKernel {\n public:\n Cast(const OpKernelInfo& info) : RocmKernel(info) {\n int64_t to;\n Status status = info.GetAttr(\"to\", &to);\n ORT_ENFORCE(status.IsOK(), \"Attribute to is not set.\");\n to_ = gsl::narrow_cast(to);\n\n int64_t saturate = info.GetAttrOrDefault(\"saturate\", int64_t{1});\n if (saturate == 0 &&\n to != ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT8E4M3FN &&\n to != ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT8E4M3FNUZ &&\n to != ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT8E5M2 &&\n to != ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT8E5M2FNUZ) {\n ORT_THROW(\"Attribute saturate is only used for cast to float 8 types.\");\n }\n saturate_ = saturate == 1;\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n ONNX_NAMESPACE::TensorProto_DataType to_;\n bool saturate_;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/common/common.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass Compress final : public CudaKernel {\n public:\n Compress(const OpKernelInfo& info) : CudaKernel(info) {\n has_axis_ = info.GetAttr(\"axis\", &axis_).IsOK();\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n int64_t axis_;\n bool has_axis_;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/common/common.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass Compress final : public RocmKernel {\n public:\n Compress(const OpKernelInfo& info) : RocmKernel(info) {\n has_axis_ = info.GetAttr(\"axis\", &axis_).IsOK();\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n int64_t axis_;\n bool has_axis_;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n#include \"core/common/common.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ncudaError_t CompressCalcPrefixSumTempStorageBytes(cudaStream_t stream, const int8_t* condition_data,\n int32_t* condition_cumulative_sum, int length, size_t& temp_storage_bytes);\ncudaError_t CompressInclusivePrefixSum(cudaStream_t stream, void* d_temp_storage, size_t temp_storage_bytes,\n const int8_t* condition_data, int32_t* condition_cumulative_sum, int length);\n\nStatus CompressImpl(cudaStream_t stream,\n const size_t element_bytes,\n const int32_t valid_condition_length,\n const int32_t axis_right_stride,\n const int32_t input_axis_dim_length,\n const int32_t output_axis_dim_length,\n const int32_t* condition_cumulative_sum,\n const bool* condition_data,\n const void* input_data,\n void* output_data,\n const size_t N);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n#include \"core/common/common.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nhipError_t CompressCalcPrefixSumTempStorageBytes(hipStream_t stream, const int8_t* condition_data,\n int32_t* condition_cumulative_sum, int length, size_t& temp_storage_bytes);\nhipError_t CompressInclusivePrefixSum(hipStream_t stream, void* d_temp_storage, size_t temp_storage_bytes,\n const int8_t* condition_data, int32_t* condition_cumulative_sum, int length);\n\nStatus CompressImpl(hipStream_t stream,\n const size_t element_bytes,\n const int32_t valid_condition_length,\n const int32_t axis_right_stride,\n const int32_t input_axis_dim_length,\n const int32_t output_axis_dim_length,\n const int32_t* condition_cumulative_sum,\n const bool* condition_data,\n const void* input_data,\n void* output_data,\n const size_t N);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/common/common.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"core/providers/cpu/tensor/concatbase.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass Concat final : public CudaKernel, public ConcatBase {\n public:\n Concat(const OpKernelInfo& info) : CudaKernel(info), ConcatBase(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/common/common.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"core/providers/cpu/tensor/concatbase.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass Concat final : public RocmKernel, public ConcatBase {\n public:\n Concat(const OpKernelInfo& info) : RocmKernel(info), ConcatBase(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n#include \"core/common/common.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nStatus ConcatSameConcatDimImpl(cudaStream_t stream, const size_t element_bytes, const int block_size_including_axis_dim,\n const int block_size_inside_axis_dim, const int64_t concat_size, void* output_data,\n const InputDataArray input_data, const size_t output_size);\n\nStatus ConcatImpl(cudaStream_t stream, const size_t element_bytes, const int block_size_including_axis_dim,\n const int block_size_inside_axis_dim, const int64_t* concat_sizes, const int64_t* concat_sizes_range,\n const int64_t* axis_dimension_input_output_mapping, void* output_data, const void** input_data,\n const size_t output_size);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n#include \"core/common/common.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nStatus ConcatSameConcatDimImpl(hipStream_t stream, const size_t element_bytes, const int block_size_including_axis_dim,\n const int block_size_inside_axis_dim, const int64_t concat_size, void* output_data,\n const InputDataArray input_data, const size_t output_size);\n\nStatus ConcatImpl(hipStream_t stream, const size_t element_bytes, const int block_size_including_axis_dim,\n const int block_size_inside_axis_dim, const int64_t* concat_sizes, const int64_t* concat_sizes_range,\n const int64_t* axis_dimension_input_output_mapping, void* output_data, const void** input_data,\n const size_t output_size);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass Expand final : public CudaKernel {\n public:\n Expand(const OpKernelInfo& info) : CudaKernel(info) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\nStatus ComputeOutputShape(\n const std::string& node_name,\n const TensorShape& lhs_shape,\n const TensorShape& rhs_shape,\n TensorShape& out_shape);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass Expand final : public RocmKernel {\n public:\n Expand(const OpKernelInfo& info) : RocmKernel(info) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\nStatus ComputeOutputShape(\n const std::string& node_name,\n const TensorShape& lhs_shape,\n const TensorShape& rhs_shape,\n TensorShape& out_shape);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\nnamespace onnxruntime {\n// Throw if \"ptr\" is not allocated on the CUDA device obtained by cudaGetDevice.\nvoid CheckIfMemoryOnCurrentGpuDevice(const void* ptr);\n} // namespace onnxruntime\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\nnamespace onnxruntime {\n// Throw if \"ptr\" is not allocated on the ROCM device obtained by hipGetDevice.\nvoid CheckIfMemoryOnCurrentGpuDevice(const void* ptr);\n} // namespace onnxruntime###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n#include \"core/providers/cuda/cuda_common.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nStatus ExpandImpl(\n cudaStream_t stream,\n const size_t element_size,\n const int N_output,\n const int N_input,\n const void* input_data,\n void* output_data,\n const TArray& output_strides,\n const TArray& input_strides);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n#include \"core/providers/rocm/rocm_common.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nStatus ExpandImpl(\n hipStream_t stream,\n const size_t element_size,\n const int N_output,\n const int N_input,\n const void* input_data,\n void* output_data,\n const TArray& output_strides,\n const TArray& input_strides);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass EyeLike final : public CudaKernel {\n public:\n EyeLike(const OpKernelInfo& info) : CudaKernel(info) {\n if (!info.GetAttr(\"k\", &k_).IsOK()) {\n k_ = 0;\n }\n\n has_dtype_ = info.GetAttr(\"dtype\", &dtype_).IsOK();\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n bool has_dtype_;\n int64_t dtype_;\n int64_t k_;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass EyeLike final : public RocmKernel {\n public:\n EyeLike(const OpKernelInfo& info) : RocmKernel(info) {\n if (!info.GetAttr(\"k\", &k_).IsOK()) {\n k_ = 0;\n }\n\n has_dtype_ = info.GetAttr(\"dtype\", &dtype_).IsOK();\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n bool has_dtype_;\n int64_t dtype_;\n int64_t k_;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n#include \"core/providers/cuda/cu_inc/common.cuh\"\n#include \"eye_like_impl.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \n__global__ void _EyeLikeKernel(\n size_t offset,\n size_t stripe,\n T* output_data,\n CUDA_LONG N) {\n CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);\n\n // offset is the first elements, stripe is width + 1.\n output_data[offset + id * stripe] = static_cast(1);\n}\n\ntemplate \nvoid EyeLikeImpl(\n cudaStream_t stream,\n size_t offset,\n size_t stripe,\n T* output_data,\n size_t diag_count) {\n constexpr int block_size = 256;\n int blocksPerGrid = (int)(ceil(static_cast(diag_count) / block_size));\n CUDA_LONG N = static_cast(diag_count);\n\n _EyeLikeKernel<<>>(offset, stripe, output_data, N);\n}\n\n#define SPECIALIZED_IMPL(T) \\\n template void EyeLikeImpl( \\\n cudaStream_t stream, \\\n size_t offset, \\\n size_t stripe, \\\n T* output_data, \\\n size_t diag_count);\n\nSPECIALIZED_IMPL(int32_t)\nSPECIALIZED_IMPL(int64_t)\nSPECIALIZED_IMPL(uint64_t)\nSPECIALIZED_IMPL(float)\nSPECIALIZED_IMPL(double)\n\n} // namespace cuda\n} // namespace onnxruntime\n\n###", "hip": " #include \"hip/hip_runtime.h\"\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n#include \"core/providers/rocm/cu_inc/common.cuh\"\n#include \"eye_like_impl.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \n__global__ void _EyeLikeKernel(\n size_t offset,\n size_t stripe,\n T* output_data,\n HIP_LONG N) {\n CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);\n\n // offset is the first elements, stripe is width + 1.\n output_data[offset + id * stripe] = static_cast(1);\n}\n\ntemplate \nvoid EyeLikeImpl(\n hipStream_t stream,\n size_t offset,\n size_t stripe,\n T* output_data,\n size_t diag_count) {\n constexpr int block_size = 256;\n int blocksPerGrid = (int)(ceil(static_cast(diag_count) / block_size));\n HIP_LONG N = static_cast(diag_count);\n\n _EyeLikeKernel<<>>(offset, stripe, output_data, N);\n}\n\n#define SPECIALIZED_IMPL(T) \\\n template void EyeLikeImpl( \\\n hipStream_t stream, \\\n size_t offset, \\\n size_t stripe, \\\n T* output_data, \\\n size_t diag_count);\n\nSPECIALIZED_IMPL(int32_t)\nSPECIALIZED_IMPL(int64_t)\nSPECIALIZED_IMPL(uint64_t)\nSPECIALIZED_IMPL(float)\nSPECIALIZED_IMPL(double)\n\n} // namespace rocm\n} // namespace onnxruntime###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n#include \"core/providers/cuda/shared_inc/fast_divmod.h\"\n#include \"core/common/common.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nvoid EyeLikeImpl(\n cudaStream_t stream,\n size_t offset, // offset of first element in diagnal\n size_t stripe, // stripe, here it's width + 1\n T* output_data, // output buffer\n size_t diag_count // total number of elements in diagnal\n);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n#include \"core/providers/rocm/shared_inc/fast_divmod.h\"\n#include \"core/common/common.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nvoid EyeLikeImpl(\n hipStream_t stream,\n size_t offset, // offset of first element in diagnal\n size_t stripe, // stripe, here it's width + 1\n T* output_data, // output buffer\n size_t diag_count // total number of elements in diagnal\n);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"flatten.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Flatten,\n kOnnxDomain,\n 1, 8,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .Alias(0, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes()),\n Flatten);\n\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Flatten,\n kOnnxDomain,\n 9, 10,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .Alias(0, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes()),\n Flatten);\n\n// explicitly support negative axis\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Flatten,\n kOnnxDomain,\n 11, 12,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .Alias(0, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes()),\n Flatten);\n\nONNX_OPERATOR_KERNEL_EX(\n Flatten,\n kOnnxDomain,\n 13,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .Alias(0, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes()),\n Flatten);\n\nStatus Flatten::ComputeInternal(OpKernelContext* ctx) const {\n const Tensor* X = ctx->Input(0);\n const TensorShape& X_shape = X->Shape();\n\n auto axis = axis_;\n // Valid axis range is [-rank, rank] instead of [-rank, rank-1], add additional check to only handle neg axis case.\n if (axis < 0) {\n axis = HandleNegativeAxis(axis, X_shape.NumDimensions()); // handle negative and enforce axis is valid\n }\n\n ORT_ENFORCE(gsl::narrow_cast(X_shape.NumDimensions()) >= axis, \"The rank of input tensor must be >= axis\");\n\n Tensor* Y = ctx->Output(0, {X_shape.SizeToDimension(axis), X_shape.SizeFromDimension(axis)});\n // If source and target pointers are not equal (non-inplace operation), we need to copy the data.\n const void* source = X->DataRaw();\n void* target = Y->MutableDataRaw();\n if (target != source) {\n CUDA_RETURN_IF_ERROR(cudaMemcpyAsync(target, source, X_shape.Size() * X->DataType()->Size(),\n cudaMemcpyDeviceToDevice, Stream(ctx)));\n }\n\n return Status::OK();\n}\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"flatten.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Flatten,\n kOnnxDomain,\n 1, 8,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .Alias(0, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes()),\n Flatten);\n\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Flatten,\n kOnnxDomain,\n 9, 10,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .Alias(0, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes()),\n Flatten);\n\n// explicitly support negative axis\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Flatten,\n kOnnxDomain,\n 11, 12,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .Alias(0, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes()),\n Flatten);\n\nONNX_OPERATOR_KERNEL_EX(\n Flatten,\n kOnnxDomain,\n 13,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .Alias(0, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes()),\n Flatten);\n\nStatus Flatten::ComputeInternal(OpKernelContext* ctx) const {\n const Tensor* X = ctx->Input(0);\n const TensorShape& X_shape = X->Shape();\n\n auto axis = axis_;\n // Valid axis range is [-rank, rank] instead of [-rank, rank-1], add additional check to only handle neg axis case.\n if (axis < 0) {\n axis = HandleNegativeAxis(axis, X_shape.NumDimensions()); // handle negative and enforce axis is valid\n }\n\n ORT_ENFORCE(gsl::narrow_cast(X_shape.NumDimensions()) >= axis, \"The rank of input tensor must be >= axis\");\n\n Tensor* Y = ctx->Output(0, {X_shape.SizeToDimension(axis), X_shape.SizeFromDimension(axis)});\n // If source and target pointers are not equal (non-inplace operation), we need to copy the data.\n const void* source = X->DataRaw();\n void* target = Y->MutableDataRaw();\n if (target != source) {\n HIP_RETURN_IF_ERROR(hipMemcpyAsync(target, source, X_shape.Size() * X->DataType()->Size(),\n hipMemcpyDeviceToDevice, Stream(ctx)));\n }\n\n return Status::OK();\n}\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass Flatten final : public CudaKernel {\n public:\n Flatten(const OpKernelInfo& info) : CudaKernel(info) {\n ORT_ENFORCE(info.GetAttr(\"axis\", &axis_).IsOK());\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n int64_t axis_;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass Flatten final : public RocmKernel {\n public:\n Flatten(const OpKernelInfo& info) : RocmKernel(info) {\n ORT_ENFORCE(info.GetAttr(\"axis\", &axis_).IsOK());\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n int64_t axis_;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n\n\n#include \"core/providers/cuda/tensor/gather_impl.h\"\n#include \"core/providers/cuda/tensor/gather.h\"\n#include \"core/providers/cpu/tensor/utils.h\"\nnamespace onnxruntime {\nnamespace cuda {\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Gather, kOnnxDomain, 1, 10, kCudaExecutionProvider, (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .TypeConstraint(\"Tind\", std::vector{\n DataTypeImpl::GetTensorType(), DataTypeImpl::GetTensorType()}), Gather);\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Gather, kOnnxDomain, 11, 12, kCudaExecutionProvider, (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .TypeConstraint(\"Tind\", std::vector{\n DataTypeImpl::GetTensorType(), DataTypeImpl::GetTensorType()}), Gather);\n\nONNX_OPERATOR_KERNEL_EX(\n Gather, kOnnxDomain, 13, kCudaExecutionProvider, (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .TypeConstraint(\"Tind\", std::vector{\n DataTypeImpl::GetTensorType(), DataTypeImpl::GetTensorType()}), Gather);\nStatus Gather::ComputeInternal(OpKernelContext* context) const {\n Prepare p;\n ORT_RETURN_IF_ERROR(PrepareForCompute(context, p));\n const TensorShape& input_shape = p.input_tensor->Shape();\n const int64_t block_size = input_shape.SizeFromDimension(p.axis + 1);\n size_t N = p.indices_tensor->Shape().Size();\n const int64_t input_block_size = input_shape.SizeFromDimension(p.axis);\n const int64_t output_block_size = N * block_size;\n const int64_t indices_max = input_shape[p.axis];\n const void* input_data = p.input_tensor->DataRaw();\n const void* indices_data = p.indices_tensor->DataRaw();\n void* output_data = p.output_tensor->MutableDataRaw();\n if (p.output_tensor->Shape().Size() == 0) {\n return Status::OK();\n }\n const fast_divmod divmod_output_block_size(gsl::narrow_cast(output_block_size));\n const fast_divmod divmod_block_size(gsl::narrow_cast(block_size));\n const size_t element_size = p.input_tensor->DataType()->Size();\n const size_t index_element_size = p.indices_tensor->DataType()->Size();\n \n \n \n if (p.indices_tensor->IsDataType() ||\n p.indices_tensor->IsDataType()) {\n GatherImpl(\n Stream(context), input_block_size, indices_max, divmod_output_block_size, divmod_block_size, indices_data, index_element_size, input_data, element_size, output_data, p.output_tensor->Shape().Size());\n return Status::OK();\n }\n return ORT_MAKE_STATUS(ONNXRUNTIME, NOT_IMPLEMENTED, \"Type for Tind not supported yet in Gather.\");\n}\n} \n} \n\n###", "hip": " \n\n#include \"core/providers/rocm/tensor/gather_impl.h\"\n#include \"core/providers/rocm/tensor/gather.h\"\n#include \"core/providers/cpu/tensor/utils.h\"\nnamespace onnxruntime {\nnamespace rocm {\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Gather, kOnnxDomain, 1, 10, kRocmExecutionProvider, (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .TypeConstraint(\"Tind\", std::vector{\n DataTypeImpl::GetTensorType(), DataTypeImpl::GetTensorType()}), Gather);\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Gather, kOnnxDomain, 11, 12, kRocmExecutionProvider, (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .TypeConstraint(\"Tind\", std::vector{\n DataTypeImpl::GetTensorType(), DataTypeImpl::GetTensorType()}), Gather);\n\nONNX_OPERATOR_KERNEL_EX(\n Gather, kOnnxDomain, 13, kRocmExecutionProvider, (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .TypeConstraint(\"Tind\", std::vector{\n DataTypeImpl::GetTensorType(), DataTypeImpl::GetTensorType()}), Gather);\nStatus Gather::ComputeInternal(OpKernelContext* context) const {\n Prepare p;\n ORT_RETURN_IF_ERROR(PrepareForCompute(context, p));\n const TensorShape& input_shape = p.input_tensor->Shape();\n const int64_t block_size = input_shape.SizeFromDimension(p.axis + 1);\n size_t N = p.indices_tensor->Shape().Size();\n const int64_t input_block_size = input_shape.SizeFromDimension(p.axis);\n const int64_t output_block_size = N * block_size;\n const int64_t indices_max = input_shape[p.axis];\n const void* input_data = p.input_tensor->DataRaw();\n const void* indices_data = p.indices_tensor->DataRaw();\n void* output_data = p.output_tensor->MutableDataRaw();\n if (p.output_tensor->Shape().Size() == 0) {\n return Status::OK();\n }\n const fast_divmod divmod_output_block_size(gsl::narrow_cast(output_block_size));\n const fast_divmod divmod_block_size(gsl::narrow_cast(block_size));\n const size_t element_size = p.input_tensor->DataType()->Size();\n const size_t index_element_size = p.indices_tensor->DataType()->Size();\n \n \n \n if (p.indices_tensor->IsDataType() ||\n p.indices_tensor->IsDataType()) {\n GatherImpl(\n Stream(context), input_block_size, indices_max, divmod_output_block_size, divmod_block_size, indices_data, index_element_size, input_data, element_size, output_data, p.output_tensor->Shape().Size());\n return Status::OK();\n }\n return ORT_MAKE_STATUS(ONNXRUNTIME, NOT_IMPLEMENTED, \"Type for Tind not supported yet in Gather.\");\n}\n} \n} ###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"core/providers/cpu/tensor/gatherbase.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass Gather : public CudaKernel, public GatherBase {\n public:\n Gather(const OpKernelInfo& info) : CudaKernel(info), GatherBase(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"core/providers/cpu/tensor/gatherbase.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass Gather : public RocmKernel, public GatherBase {\n public:\n Gather(const OpKernelInfo& info) : RocmKernel(info), GatherBase(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n#pragma once\n\n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"core/providers/shared_library/provider_api.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nstruct GatherScatterElementsArgs;\n\n// Coalesce those contiguous axes that have same dim values for both input and indices (exclude the gather/scatter axis)\n// so that we will have less divmod to compute during the kernels.\n// For example:\n// shape(input)=[2,2,2], shape(indices)=[2,2,3], axis=2 is same as shape(input)=[4,2], shape(indices)=[4,3], axis=1\n// shape(input)=[2,1,2,2,3,2,2], shape(indices)=[2,1,2,2,2,2,2], axis=3) is same as\n// shape(input)=[4,2,3,4],shape(indices)=[4,2,2,4], axis=1\n// If indices is strided, dim i (outer) and dim j is contiguous when strides[i] = shape[j] * strides[j].\n// For example:\n// shape(indices)=[2,3,4,5], strides(indices)=[0,20,5,1], then dim-2 and dim-3 is contiguous (5==5*1),\n// dim-1 and dim-2 is contiguous (20==4*5), but dim-0 and dim-1 is not contiguous (0!=3*20).\nvoid CoalesceDimensions(TensorShapeVector& input_shape, TensorShapeVector& indices_shape,\n TensorShapeVector* p_indices_strides, int64_t axis, GatherScatterElementsArgs& args);\nONNX_NAMESPACE::TensorProto_DataType GetElementType(size_t element_size);\n\nclass GatherElements final : public CudaKernel {\n public:\n GatherElements(const OpKernelInfo& info) : CudaKernel(info) {\n ORT_ENFORCE(info.GetAttr(\"axis\", &axis_).IsOK(), \"Missing/Invalid 'axis' attribute value\");\n }\n ~GatherElements() = default;\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n template \n struct ComputeImpl;\n\n int64_t axis_;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n#pragma once\n\n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"core/providers/shared_library/provider_api.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nstruct GatherScatterElementsArgs;\n\n// Coalesce those contiguous axes that have same dim values for both input and indices (exclude the gather/scatter axis)\n// so that we will have less divmod to compute during the kernels.\n// For example:\n// shape(input)=[2,2,2], shape(indices)=[2,2,3], axis=2 is same as shape(input)=[4,2], shape(indices)=[4,3], axis=1\n// shape(input)=[2,1,2,2,3,2,2], shape(indices)=[2,1,2,2,2,2,2], axis=3) is same as\n// shape(input)=[4,2,3,4],shape(indices)=[4,2,2,4], axis=1\n// If indices is strided, dim i (outer) and dim j is contiguous when strides[i] = shape[j] * strides[j].\n// For example:\n// shape(indices)=[2,3,4,5], strides(indices)=[0,20,5,1], then dim-2 and dim-3 is contiguous (5==5*1),\n// dim-1 and dim-2 is contiguous (20==4*5), but dim-0 and dim-1 is not contiguous (0!=3*20).\nvoid CoalesceDimensions(TensorShapeVector& input_shape, TensorShapeVector& indices_shape,\n TensorShapeVector* p_indices_strides, int64_t axis, GatherScatterElementsArgs& args);\nONNX_NAMESPACE::TensorProto_DataType GetElementType(size_t element_size);\n\nclass GatherElements final : public RocmKernel {\n public:\n GatherElements(const OpKernelInfo& info) : RocmKernel(info) {\n ORT_ENFORCE(info.GetAttr(\"axis\", &axis_).IsOK(), \"Missing/Invalid 'axis' attribute value\");\n }\n ~GatherElements() = default;\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n template \n struct ComputeImpl;\n\n int64_t axis_;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nstruct GatherScatterElementsArgs {\n int64_t rank;\n int64_t axis;\n int64_t input_size;\n int64_t input_dim_along_axis;\n int64_t input_stride_along_axis;\n TArray masked_input_strides;\n TArray indices_fdms;\n TArray indices_strides;\n int64_t indices_size;\n};\n\ntemplate \nvoid GatherElementsImpl(cudaStream_t stream, const T* input_data, const TIndex* indices_data, T* output_data,\n const GatherScatterElementsArgs& args);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nstruct GatherScatterElementsArgs {\n int64_t rank;\n int64_t axis;\n int64_t input_size;\n int64_t input_dim_along_axis;\n int64_t input_stride_along_axis;\n TArray masked_input_strides;\n TArray indices_fdms;\n TArray indices_strides;\n int64_t indices_size;\n};\n\ntemplate \nvoid GatherElementsImpl(hipStream_t stream, const T* input_data, const TIndex* indices_data, T* output_data,\n const GatherScatterElementsArgs& args);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/cuda/cuda_graph.h\"\n\n#include \"core/providers/cuda/cuda_common.h\"\n#include \n#include \n\nnamespace onnxruntime {\n\nCUDAGraph::CUDAGraph(cudaStream_t stream) : stream_(stream) {\n}\n\nvoid CUDAGraph::SetStream(cudaStream_t stream) {\n stream_ = stream;\n}\n\nvoid CUDAGraph::CaptureBegin() {\n ORT_ENFORCE(!has_graph_exec_,\n \"This cuda graph has already captured a graph. \"\n \"Create a new instance to capture a new graph.\");\n\n CUDA_CALL_THROW(cudaStreamSynchronize(stream_));\n // For now cuda graph can only work with a single thread. In the future, we\n // will support multiple threads. For multiple threads with multiple graphs\n // and streams, `cudaStreamCaptureModeGlobal` needs to be changed to\n // `cudaStreamCaptureModeThreadLocal`\n CUDA_CALL_THROW(cudaStreamBeginCapture(stream_, cudaStreamCaptureModeGlobal));\n}\n\nvoid CUDAGraph::CaptureEnd() {\n CUDA_CALL_THROW(cudaStreamEndCapture(stream_, &graph_));\n if (graph_ == NULL) {\n ORT_THROW(\"CUDAGraph::CaptureEnd: graph_ is NULL\");\n }\n\n has_graph_ = true;\n CUDA_CALL_THROW(cudaGraphInstantiate(&graph_exec_, graph_, NULL, NULL, 0));\n has_graph_exec_ = true;\n CUDA_CALL_THROW(cudaGraphDestroy(graph_));\n has_graph_ = false;\n}\n\nStatus CUDAGraph::Replay() {\n // Although this function is not thread safe, the lock is not needed here because\n // CUDA EP maintains a separate cuda graph per thread\n LOGS_DEFAULT(INFO) << \"Replaying CUDA graph on stream \" << stream_;\n CUDA_RETURN_IF_ERROR(cudaGraphLaunch(graph_exec_, stream_));\n CUDA_RETURN_IF_ERROR(cudaStreamSynchronize(stream_));\n return Status::OK();\n}\n\nvoid CUDAGraph::Reset() {\n if (has_graph_) {\n CUDA_CALL_THROW(cudaGraphDestroy(graph_));\n has_graph_ = false;\n }\n if (has_graph_exec_) {\n CUDA_CALL_THROW(cudaGraphExecDestroy(graph_exec_));\n has_graph_exec_ = false;\n }\n}\n\nCUDAGraph::~CUDAGraph() {\n Reset();\n}\n\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/rocm/rocm_graph.h\"\n\n#include \"core/providers/rocm/rocm_common.h\"\n#include \n#include \n\nnamespace onnxruntime {\n\nROCMGraph::ROCMGraph(hipStream_t stream) : stream_(stream) {\n}\n\nvoid ROCMGraph::SetStream(hipStream_t stream) {\n stream_ = stream;\n}\n\nvoid ROCMGraph::CaptureBegin() {\n ORT_ENFORCE(!has_graph_exec_,\n \"This rocm graph has already captured a graph. \"\n \"Create a new instance to capture a new graph.\");\n\n HIP_CALL_THROW(hipStreamSynchronize(stream_));\n // For now rocm graph can only work with a single thread. In the future, we\n // will support multiple threads. For multiple threads with multiple graphs\n // and streams, `hipStreamCaptureModeGlobal` needs to be changed to\n // `hipStreamCaptureModeThreadLocal`\n HIP_CALL_THROW(hipStreamBeginCapture(stream_, hipStreamCaptureModeGlobal));\n}\n\nvoid ROCMGraph::CaptureEnd() {\n HIP_CALL_THROW(hipStreamEndCapture(stream_, &graph_));\n if (graph_ == NULL) {\n ORT_THROW(\"ROCMGraph::CaptureEnd: graph_ is NULL\");\n }\n\n has_graph_ = true;\n HIP_CALL_THROW(hipGraphInstantiate(&graph_exec_, graph_, NULL, NULL, 0));\n has_graph_exec_ = true;\n HIP_CALL_THROW(hipGraphDestroy(graph_));\n has_graph_ = false;\n}\n\nStatus ROCMGraph::Replay() {\n // Although this function is not thread safe, the lock is not needed here because\n // ROCM EP maintains a separate rocm graph per thread\n LOGS_DEFAULT(INFO) << \"Replaying ROCM graph on stream \" << stream_;\n HIP_RETURN_IF_ERROR(hipGraphLaunch(graph_exec_, stream_));\n HIP_RETURN_IF_ERROR(hipStreamSynchronize(stream_));\n return Status::OK();\n}\n\nvoid ROCMGraph::Reset() {\n if (has_graph_) {\n HIP_CALL_THROW(hipGraphDestroy(graph_));\n has_graph_ = false;\n }\n if (has_graph_exec_) {\n HIP_CALL_THROW(hipGraphExecDestroy(graph_exec_));\n has_graph_exec_ = false;\n }\n}\n\nROCMGraph::~ROCMGraph() {\n Reset();\n}\n\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nvoid GatherImpl(\n cudaStream_t stream,\n const int64_t input_block_size,\n const int64_t indices_max,\n const fast_divmod& output_block_size,\n const fast_divmod& block_size,\n const void* indices_data,\n size_t index_element_size,\n const void* input_data,\n size_t element_size,\n void* output_data,\n const size_t N);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nvoid GatherImpl(\n hipStream_t stream,\n const int64_t input_block_size,\n const int64_t indices_max,\n const fast_divmod& output_block_size,\n const fast_divmod& block_size,\n const void* indices_data,\n size_t index_element_size,\n const void* input_data,\n size_t element_size,\n void* output_data,\n const size_t N);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nStatus CheckBatchDimensionsMatch(\n size_t num_batch_dimensions,\n const std::vector>& tensor_shapes);\n\nclass GatherNDBase : public CudaKernel {\n public:\n GatherNDBase(const OpKernelInfo& info) : CudaKernel(info) {\n info.GetAttrOrDefault(\"batch_dims\", &batch_dims_, static_cast(0));\n ORT_ENFORCE(batch_dims_ >= 0);\n }\n\n protected:\n template \n Status PrepareCompute(\n onnxruntime::Stream* stream,\n const int64_t batch_dims,\n const TensorShape& input_shape,\n const TensorShape& indices_shape,\n const Tensor* indices_tensor,\n int64_t& num_slices,\n int64_t& slice_size,\n IAllocatorUniquePtr& input_slice_offsets_buffer) const;\n\n int64_t batch_dims_;\n};\n\ntemplate \nclass GatherND final : public GatherNDBase {\n public:\n GatherND(const OpKernelInfo& info) : GatherNDBase(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nStatus CheckBatchDimensionsMatch(\n size_t num_batch_dimensions,\n const std::vector>& tensor_shapes);\n\nclass GatherNDBase : public RocmKernel {\n public:\n GatherNDBase(const OpKernelInfo& info) : RocmKernel(info) {\n info.GetAttrOrDefault(\"batch_dims\", &batch_dims_, static_cast(0));\n ORT_ENFORCE(batch_dims_ >= 0);\n }\n\n protected:\n template \n Status PrepareCompute(\n onnxruntime::Stream* stream,\n const int64_t batch_dims,\n const TensorShape& input_shape,\n const TensorShape& indices_shape,\n const Tensor* indices_tensor,\n int64_t& num_slices,\n int64_t& slice_size,\n IAllocatorUniquePtr& input_slice_offsets_buffer) const;\n\n int64_t batch_dims_;\n};\n\ntemplate \nclass GatherND final : public GatherNDBase {\n public:\n GatherND(const OpKernelInfo& info) : GatherNDBase(info) {}\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nvoid ComputeSliceOffsetsImpl(\n cudaStream_t stream,\n const int64_t batch_dims,\n const TArray input_dims,\n const size_t num_slices,\n const size_t num_slices_per_batch,\n const size_t input_batch_stride,\n const size_t num_slice_dims,\n const int64_t* const sizes_from_slice_dims_data, // num_slice_dims elements\n const TIndex* const indices_data, // num_slices * num_slice_dims elements\n int64_t* const input_slice_offsets_data); // num_slices elements\n\ntemplate \nvoid GatherNDImpl(\n cudaStream_t stream,\n const size_t num_slices,\n const void* input_data,\n void* output_data,\n const size_t slice_size,\n const int64_t* input_slice_offsets_data);\n\n#ifdef ENABLE_TRAINING_OPS\ntemplate \nvoid GatherNDGradImpl(\n cudaStream_t stream,\n const size_t num_slices,\n const void* update_data,\n void* output_data,\n const size_t slice_size,\n const int64_t* input_slice_offsets_data);\n#endif\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nvoid ComputeSliceOffsetsImpl(\n hipStream_t stream,\n const int64_t batch_dims,\n const TArray input_dims,\n const size_t num_slices,\n const size_t num_slices_per_batch,\n const size_t input_batch_stride,\n const size_t num_slice_dims,\n const int64_t* const sizes_from_slice_dims_data, // num_slice_dims elements\n const TIndex* const indices_data, // num_slices * num_slice_dims elements\n int64_t* const input_slice_offsets_data); // num_slices elements\n\ntemplate \nvoid GatherNDImpl(\n hipStream_t stream,\n const size_t num_slices,\n const void* input_data,\n void* output_data,\n const size_t slice_size,\n const int64_t* input_slice_offsets_data);\n\n#ifdef ENABLE_TRAINING_OPS\ntemplate \nvoid GatherNDGradImpl(\n hipStream_t stream,\n const size_t num_slices,\n const void* update_data,\n void* output_data,\n const size_t slice_size,\n const int64_t* input_slice_offsets_data);\n#endif\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"identity_op.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Dropout,\n kOnnxDomain,\n 7, 9,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", {DataTypeImpl::GetTensorType(),\n DataTypeImpl::GetTensorType(),\n DataTypeImpl::GetTensorType()})\n .Alias(0, 0),\n IdentityOp);\n\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Dropout,\n kOnnxDomain,\n 10,\n 11,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", {DataTypeImpl::GetTensorType(),\n DataTypeImpl::GetTensorType(),\n DataTypeImpl::GetTensorType()})\n .TypeConstraint(\"T1\", DataTypeImpl::GetTensorType())\n .Alias(0, 0),\n IdentityOp);\n\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Identity,\n kOnnxDomain,\n 1, 12,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .Alias(0, 0),\n IdentityOp);\n\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Identity,\n kOnnxDomain,\n 13, 13,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .Alias(0, 0),\n IdentityOp);\n\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Identity,\n kOnnxDomain,\n 14, 18,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"V\", DataTypeImpl::AllFixedSizeTensorAndSequenceTensorTypes())\n .Alias(0, 0),\n IdentityOp);\n\nONNX_OPERATOR_KERNEL_EX(\n Identity,\n kOnnxDomain,\n 19,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"V\", DataTypeImpl::AllFixedSizeTensorAndSequenceTensorTypesIRv9())\n .Alias(0, 0),\n IdentityOp);\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"identity_op.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Dropout,\n kOnnxDomain,\n 7, 9,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", {DataTypeImpl::GetTensorType(),\n DataTypeImpl::GetTensorType(),\n DataTypeImpl::GetTensorType()})\n .Alias(0, 0),\n IdentityOp);\n\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Dropout,\n kOnnxDomain,\n 10,\n 11,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", {DataTypeImpl::GetTensorType(),\n DataTypeImpl::GetTensorType(),\n DataTypeImpl::GetTensorType()})\n .TypeConstraint(\"T1\", DataTypeImpl::GetTensorType())\n .Alias(0, 0),\n IdentityOp);\n\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Identity,\n kOnnxDomain,\n 1, 12,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .Alias(0, 0),\n IdentityOp);\n\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Identity,\n kOnnxDomain,\n 13, 13,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .Alias(0, 0),\n IdentityOp);\n\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Identity,\n kOnnxDomain,\n 14, 18,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"V\", DataTypeImpl::AllFixedSizeTensorAndSequenceTensorTypes())\n .Alias(0, 0),\n IdentityOp);\n\nONNX_OPERATOR_KERNEL_EX(\n Identity,\n kOnnxDomain,\n 19,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"V\", DataTypeImpl::AllFixedSizeTensorAndSequenceTensorTypesIRv9())\n .Alias(0, 0),\n IdentityOp);\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n\n\n#pragma once\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\nnamespace onnxruntime {\nnamespace cuda {\ntemplate \nclass IdentityOp final : public CudaKernel {\n public:\n IdentityOp(const OpKernelInfo& info) : CudaKernel(info) {\n }\n Status ComputeInternal(OpKernelContext* context) const override {\n auto X_ml_type = context->InputType(0);\n if (X_ml_type->IsTensorType()) {\n const Tensor* X = context->Input(0);\n if (nullptr == X) {\n return Status(common::ONNXRUNTIME, common::FAIL, \"IdentityOp cuda: input count mismatch.\");\n }\n const TensorShape& shape = X->Shape();\n Tensor* Y = context->Output(0, shape);\n if (nullptr == Y) {\n return Status(common::ONNXRUNTIME, common::FAIL, \"IdentityOp cuda: failed to allocate output tensor.\");\n }\n auto X_type = X->DataType();\n const void* source = X->DataRaw(X_type);\n void* target = Y->MutableDataRaw(X_type);\n \n if (target != source) {\n CUDA_RETURN_IF_ERROR(cudaMemcpyAsync(target, source, X->Shape().Size() * X->DataType()->Size(), cudaMemcpyDeviceToDevice, Stream(context)));\n }\n if (is_dropout) {\n Tensor* mask = context->Output(1, shape);\n \n if (mask != nullptr) {\n \n \n \n \n void* mask_data = mask->MutableDataRaw();\n \n \n CUDA_RETURN_IF_ERROR(cudaMemsetAsync(mask_data, 0, mask->SizeInBytes(), Stream(context)));\n }\n }\n } else if (X_ml_type->IsTensorSequenceType()) {\n const TensorSeq* X = context->Input(0);\n ORT_ENFORCE(X != nullptr, \"IdentityOp cuda: input tensor is missing.\");\n TensorSeq* Y = context->Output(0);\n ORT_ENFORCE(Y != nullptr, \"IdentityOp cuda: failed to allocate output tensor sequence.\");\n if (X == Y) {\n return Status::OK();\n }\n auto X_type = X->DataType();\n Y->SetType(X_type);\n AllocatorPtr alloc;\n auto status = context->GetTempSpaceAllocator(&alloc);\n if (!status.IsOK()) {\n return Status(common::ONNXRUNTIME, common::FAIL, \"IdentityOp cuda: unable to get an allocator.\");\n }\n auto X_size = X->Size();\n Y->Reserve(X_size);\n for (size_t i = 0; i < X_size; ++i) {\n const Tensor& source_tensor = X->Get(i);\n std::unique_ptr target_tensor = Tensor::Create(source_tensor.DataType(), source_tensor.Shape(), alloc);\n CUDA_RETURN_IF_ERROR(cudaMemcpyAsync(target_tensor->MutableDataRaw(), source_tensor.DataRaw(), source_tensor.SizeInBytes(), cudaMemcpyDeviceToDevice, Stream(context)));\n Y->Add(std::move(*target_tensor));\n }\n } else {\n return Status(common::ONNXRUNTIME, common::FAIL, \"IdentityOp cuda: unsupported input type.\");\n }\n return Status::OK();\n }\n};\n} \n} \n\n###", "hip": " \n\n#pragma once\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\nnamespace onnxruntime {\nnamespace rocm {\ntemplate \nclass IdentityOp final : public RocmKernel {\n public:\n IdentityOp(const OpKernelInfo& info) : RocmKernel(info) {\n }\n Status ComputeInternal(OpKernelContext* context) const override {\n auto X_ml_type = context->InputType(0);\n if (X_ml_type->IsTensorType()) {\n const Tensor* X = context->Input(0);\n if (nullptr == X) {\n return Status(common::ONNXRUNTIME, common::FAIL, \"IdentityOp rocm: input count mismatch.\");\n }\n const TensorShape& shape = X->Shape();\n Tensor* Y = context->Output(0, shape);\n if (nullptr == Y) {\n return Status(common::ONNXRUNTIME, common::FAIL, \"IdentityOp rocm: failed to allocate output tensor.\");\n }\n auto X_type = X->DataType();\n const void* source = X->DataRaw(X_type);\n void* target = Y->MutableDataRaw(X_type);\n \n if (target != source) {\n HIP_RETURN_IF_ERROR(hipMemcpyAsync(target, source, X->Shape().Size() * X->DataType()->Size(), hipMemcpyDeviceToDevice, Stream(context)));\n }\n if (is_dropout) {\n Tensor* mask = context->Output(1, shape);\n \n if (mask != nullptr) {\n \n \n \n \n void* mask_data = mask->MutableDataRaw();\n \n \n HIP_RETURN_IF_ERROR(hipMemsetAsync(mask_data, 0, mask->SizeInBytes(), Stream(context)));\n }\n }\n } else if (X_ml_type->IsTensorSequenceType()) {\n const TensorSeq* X = context->Input(0);\n ORT_ENFORCE(X != nullptr, \"IdentityOp rocm: input tensor is missing.\");\n TensorSeq* Y = context->Output(0);\n ORT_ENFORCE(Y != nullptr, \"IdentityOp rocm: failed to allocate output tensor sequence.\");\n if (X == Y) {\n return Status::OK();\n }\n auto X_type = X->DataType();\n Y->SetType(X_type);\n AllocatorPtr alloc;\n auto status = context->GetTempSpaceAllocator(&alloc);\n if (!status.IsOK()) {\n return Status(common::ONNXRUNTIME, common::FAIL, \"IdentityOp rocm: unable to get an allocator.\");\n }\n auto X_size = X->Size();\n Y->Reserve(X_size);\n for (size_t i = 0; i < X_size; ++i) {\n const Tensor& source_tensor = X->Get(i);\n std::unique_ptr target_tensor = Tensor::Create(source_tensor.DataType(), source_tensor.Shape(), alloc);\n HIP_RETURN_IF_ERROR(hipMemcpyAsync(target_tensor->MutableDataRaw(), source_tensor.DataRaw(), source_tensor.SizeInBytes(), hipMemcpyDeviceToDevice, Stream(context)));\n Y->Add(std::move(*target_tensor));\n }\n } else {\n return Status(common::ONNXRUNTIME, common::FAIL, \"IdentityOp rocm: unsupported input type.\");\n }\n return Status::OK();\n }\n};\n} \n} ###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nint NonZeroCalcBlockCount(int64_t x_size);\n\ncudaError_t NonZeroCalcPrefixSumTempStorageBytes(cudaStream_t stream, int* prefix_counts, int number_of_blocks, size_t&);\n\ncudaError_t NonZeroInclusivePrefixSum(cudaStream_t stream, void* d_temp_storage, size_t temp_storage_bytes, int* prefix_counts, int number_of_blocks);\n\n// count nonzero elements in each block into counts_in_blocks,\n// the counts_in_blocks buffer is pre-allocated on gpu first.\ntemplate \ncudaError_t NonZeroCountEachBlock(cudaStream_t stream, const InputT* x, int64_t x_size, int* counts_in_blocks);\n\n// output nonzero positions using input x and prefix_counts for each blocks\ntemplate \ncudaError_t NonZeroOutputPositions(\n cudaStream_t stream, const InputT* x, int64_t x_size, int x_rank, const TArray& x_strides,\n const int* prefix_counts, int nonzero_elements, int64_t* results);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nint NonZeroCalcBlockCount(int64_t x_size);\n\nhipError_t NonZeroCalcPrefixSumTempStorageBytes(hipStream_t stream, int* prefix_counts, int number_of_blocks, size_t&);\n\nhipError_t NonZeroInclusivePrefixSum(hipStream_t stream, void* d_temp_storage, size_t temp_storage_bytes, int* prefix_counts, int number_of_blocks);\n\n// count nonzero elements in each block into counts_in_blocks,\n// the counts_in_blocks buffer is pre-allocated on gpu first.\ntemplate \nhipError_t NonZeroCountEachBlock(hipStream_t stream, const InputT* x, int64_t x_size, int* counts_in_blocks);\n\n// output nonzero positions using input x and prefix_counts for each blocks\ntemplate \nhipError_t NonZeroOutputPositions(\n hipStream_t stream, const InputT* x, int64_t x_size, int x_rank, const TArray& x_strides,\n const int* prefix_counts, int nonzero_elements, int64_t* results);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nclass NonZero final : public CudaKernel {\n public:\n NonZero(const OpKernelInfo& info) : CudaKernel(info) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nclass NonZero final : public RocmKernel {\n public:\n NonZero(const OpKernelInfo& info) : RocmKernel(info) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nvoid OneHotImpl(\n cudaStream_t stream,\n const in_type* indices,\n const fast_divmod fdm_depth_suffix,\n const fast_divmod fdm_suffix,\n const int64_t depth_val,\n const out_type on_value,\n const out_type off_value,\n out_type* output,\n size_t count);\n\ntemplate \nvoid OneHotWithZeroOffValueImpl(\n cudaStream_t stream,\n const in_type* indices,\n const fast_divmod fdm_suffix,\n const int64_t depth_val,\n const out_type on_value,\n out_type* output,\n size_t count);\n\ntemplate \nclass OneHotOp final : public CudaKernel {\n public:\n explicit OneHotOp(const OpKernelInfo& info) : CudaKernel(info) {\n int64_t tmp_axis;\n if (info.GetAttr(\"axis\", &tmp_axis).IsOK()) {\n axis_ = tmp_axis;\n }\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(OneHotOp);\n\n int64_t axis_ = -1;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nvoid OneHotImpl(\n hipStream_t stream,\n const in_type* indices,\n const fast_divmod fdm_depth_suffix,\n const fast_divmod fdm_suffix,\n const int64_t depth_val,\n const out_type on_value,\n const out_type off_value,\n out_type* output,\n size_t count);\n\ntemplate \nvoid OneHotWithZeroOffValueImpl(\n hipStream_t stream,\n const in_type* indices,\n const fast_divmod fdm_suffix,\n const int64_t depth_val,\n const out_type on_value,\n out_type* output,\n size_t count);\n\ntemplate \nclass OneHotOp final : public RocmKernel {\n public:\n explicit OneHotOp(const OpKernelInfo& info) : RocmKernel(info) {\n int64_t tmp_axis;\n if (info.GetAttr(\"axis\", &tmp_axis).IsOK()) {\n axis_ = tmp_axis;\n }\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(OneHotOp);\n\n int64_t axis_ = -1;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"core/providers/cpu/tensor/padbase.h\"\n\nusing onnxruntime::PadBase;\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nclass Pad final : public PadBase, public CudaKernel {\n public:\n Pad(const OpKernelInfo& info) : PadBase(info), CudaKernel(info) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"core/providers/cpu/tensor/padbase.h\"\n\nusing onnxruntime::PadBase;\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nclass Pad final : public PadBase, public RocmKernel {\n public:\n Pad(const OpKernelInfo& info) : PadBase(info), RocmKernel(info) {}\n\n Status ComputeInternal(OpKernelContext* context) const override;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nvoid PadNCHWInputWithPaddingAlongHAndWImpl(\n cudaStream_t stream,\n const int64_t n, // Batch\n const int64_t c, // Channel\n const int64_t input_height,\n const int64_t output_height,\n const int64_t input_width,\n const int64_t output_width,\n const int64_t pad_height_start,\n const int64_t pad_width_start,\n const T pad_value,\n const int pad_mode,\n const T* input_data,\n T* output_data,\n const size_t N);\n\ntemplate \nvoid PadImpl(\n cudaStream_t stream,\n const size_t shape_rank,\n const TArray& input_dims,\n const TArray& input_strides,\n const TArray& lower_pads,\n const T pad_value,\n const int pad_mode,\n const T* input_data,\n const TArray& fdm_output_strides,\n T* output_data,\n const size_t N);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nvoid PadNCHWInputWithPaddingAlongHAndWImpl(\n hipStream_t stream,\n const int64_t n, // Batch\n const int64_t c, // Channel\n const int64_t input_height,\n const int64_t output_height,\n const int64_t input_width,\n const int64_t output_width,\n const int64_t pad_height_start,\n const int64_t pad_width_start,\n const T pad_value,\n const int pad_mode,\n const T* input_data,\n T* output_data,\n const size_t N);\n\ntemplate \nvoid PadImpl(\n hipStream_t stream,\n const size_t shape_rank,\n const TArray& input_dims,\n const TArray& input_strides,\n const TArray& lower_pads,\n const T pad_value,\n const int pad_mode,\n const T* input_data,\n const TArray& fdm_output_strides,\n T* output_data,\n const size_t N);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/common/common.h\"\n#include \"core/platform/ort_mutex.h\"\n#include \"core/providers/cuda/cuda_pch.h\"\n\nnamespace onnxruntime {\n\nusing CaptureId_t = unsigned long long;\n\nstruct CUDAGraph {\n CUDAGraph(){};\n CUDAGraph(cudaStream_t stream);\n ~CUDAGraph();\n\n void SetStream(cudaStream_t stream);\n void CaptureBegin();\n void CaptureEnd();\n Status Replay();\n void Reset();\n\n private:\n cudaGraph_t graph_ = NULL;\n cudaGraphExec_t graph_exec_ = NULL;\n\n bool has_graph_ = false;\n bool has_graph_exec_ = false;\n\n cudaStream_t stream_ = nullptr; // Does not own the stream\n};\n\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/common/common.h\"\n#include \"core/platform/ort_mutex.h\"\n#include \"core/providers/rocm/rocm_pch.h\"\n\nnamespace onnxruntime {\n\nusing CaptureId_t = unsigned long long;\n\nstruct ROCMGraph {\n ROCMGraph(){};\n ROCMGraph(hipStream_t stream);\n ~ROCMGraph();\n\n void SetStream(hipStream_t stream);\n void CaptureBegin();\n void CaptureEnd();\n Status Replay();\n void Reset();\n\n private:\n hipGraph_t graph_ = NULL;\n hipGraphExec_t graph_exec_ = NULL;\n\n bool has_graph_ = false;\n bool has_graph_exec_ = false;\n\n hipStream_t stream_ = nullptr; // Does not own the stream\n};\n\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"quantize_linear.h\"\n#include \"core/providers/cuda/cuda_common.h\"\n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nStatus CudaQuantizeLinearStd(cudaStream_t stream, const U* input, T* output, const U* scale, const T* zero_point, size_t num_of_element);\n\ntemplate \nStatus CudaQuantizeLinearSat(cudaStream_t stream, const U* input, T* output, const U* scale, const T* zero_point, size_t num_of_element,\n bool saturate);\n\ntemplate \nStatus CudaQuantizeLinearAxisStd(cudaStream_t stream, const U* input, T* output, const U* scale, const T* zero_point, size_t num_of_element,\n size_t batch_size, size_t n_scales);\n\ntemplate \nStatus CudaQuantizeLinearAxisSat(cudaStream_t stream, const U* input, T* output, const U* scale, const T* zero_point, size_t num_of_element,\n size_t batch_size, size_t n_scales, bool saturate);\n\ntemplate \nStatus CudaDequantizeLinearStd(cudaStream_t stream, const T* input, U* output, const U* scale, const T* zero_point, size_t num_of_element);\n\ntemplate \nStatus CudaDequantizeLinearSat(cudaStream_t stream, const T* input, U* output, const U* scale, const T* zero_point, size_t num_of_element);\n\ntemplate \nStatus CudaDequantizeLinearAxisStd(cudaStream_t stream, const T* input, U* output, const U* scale, const T* zero_point, size_t num_of_element,\n size_t batch_size, size_t n_scales);\n\ntemplate \nStatus CudaDequantizeLinearAxisSat(cudaStream_t stream, const T* input, U* output, const U* scale, const T* zero_point, size_t num_of_element,\n size_t batch_size, size_t n_scales);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"quantize_linear.h\"\n#include \"core/providers/rocm/rocm_common.h\"\n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nStatus CudaQuantizeLinearStd(hipStream_t stream, const U* input, T* output, const U* scale, const T* zero_point, size_t num_of_element);\n\ntemplate \nStatus CudaQuantizeLinearSat(hipStream_t stream, const U* input, T* output, const U* scale, const T* zero_point, size_t num_of_element,\n bool saturate);\n\ntemplate \nStatus CudaQuantizeLinearAxisStd(hipStream_t stream, const U* input, T* output, const U* scale, const T* zero_point, size_t num_of_element,\n size_t batch_size, size_t n_scales);\n\ntemplate \nStatus CudaQuantizeLinearAxisSat(hipStream_t stream, const U* input, T* output, const U* scale, const T* zero_point, size_t num_of_element,\n size_t batch_size, size_t n_scales, bool saturate);\n\ntemplate \nStatus CudaDequantizeLinearStd(hipStream_t stream, const T* input, U* output, const U* scale, const T* zero_point, size_t num_of_element);\n\ntemplate \nStatus CudaDequantizeLinearSat(hipStream_t stream, const T* input, U* output, const U* scale, const T* zero_point, size_t num_of_element);\n\ntemplate \nStatus CudaDequantizeLinearAxisStd(hipStream_t stream, const T* input, U* output, const U* scale, const T* zero_point, size_t num_of_element,\n size_t batch_size, size_t n_scales);\n\ntemplate \nStatus CudaDequantizeLinearAxisSat(hipStream_t stream, const T* input, U* output, const U* scale, const T* zero_point, size_t num_of_element,\n size_t batch_size, size_t n_scales);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nclass QuantizeLinear final : public CudaKernel {\n public:\n QuantizeLinear(const OpKernelInfo& info) : CudaKernel(info) {\n if (!info.GetAttr(\"axis\", &axis_).IsOK()) {\n axis_ = 1;\n }\n if (!info.GetAttr(\"saturate\", &saturate_).IsOK()) {\n saturate_ = 1;\n }\n }\n\n Status ComputeInternal(OpKernelContext* p_op_kernel_context) const override;\n\n private:\n int64_t axis_;\n int64_t saturate_;\n};\n\ntemplate \nclass DequantizeLinear final : public CudaKernel {\n public:\n DequantizeLinear(const OpKernelInfo& info) : CudaKernel(info) {\n if (!info.GetAttr(\"axis\", &axis_).IsOK()) {\n axis_ = 1;\n }\n }\n\n Status ComputeInternal(OpKernelContext* p_op_kernel_context) const override;\n\n private:\n int64_t axis_;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nclass QuantizeLinear final : public RocmKernel {\n public:\n QuantizeLinear(const OpKernelInfo& info) : RocmKernel(info) {\n if (!info.GetAttr(\"axis\", &axis_).IsOK()) {\n axis_ = 1;\n }\n if (!info.GetAttr(\"saturate\", &saturate_).IsOK()) {\n saturate_ = 1;\n }\n }\n\n Status ComputeInternal(OpKernelContext* p_op_kernel_context) const override;\n\n private:\n int64_t axis_;\n int64_t saturate_;\n};\n\ntemplate \nclass DequantizeLinear final : public RocmKernel {\n public:\n DequantizeLinear(const OpKernelInfo& info) : RocmKernel(info) {\n if (!info.GetAttr(\"axis\", &axis_).IsOK()) {\n axis_ = 1;\n }\n }\n\n Status ComputeInternal(OpKernelContext* p_op_kernel_context) const override;\n\n private:\n int64_t axis_;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"reshape.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nONNX_OPERATOR_KERNEL_EX(\n Reshape,\n kOnnxDomain,\n 19,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypesIRv9())\n .TypeConstraint(\"shape\", DataTypeImpl::GetTensorType())\n .Alias(0, 0)\n .InputMemoryType(OrtMemTypeCPUInput, 1),\n Reshape);\n\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Reshape,\n kOnnxDomain,\n 14, 18,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .TypeConstraint(\"shape\", DataTypeImpl::GetTensorType())\n .Alias(0, 0)\n .InputMemoryType(OrtMemTypeCPUInput, 1),\n Reshape);\n\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Reshape,\n kOnnxDomain,\n 13, 13,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .TypeConstraint(\"shape\", DataTypeImpl::GetTensorType())\n .Alias(0, 0)\n .InputMemoryType(OrtMemTypeCPUInput, 1),\n Reshape);\n\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Reshape,\n kOnnxDomain,\n 5, 12,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .TypeConstraint(\"shape\", DataTypeImpl::GetTensorType())\n .Alias(0, 0)\n .InputMemoryType(OrtMemTypeCPUInput, 1),\n Reshape);\n\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Reshape,\n kOnnxDomain,\n 1,\n 4,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create())\n .Alias(0, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes()),\n Reshape_1);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"reshape.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nONNX_OPERATOR_KERNEL_EX(\n Reshape,\n kOnnxDomain,\n 19,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypesIRv9())\n .TypeConstraint(\"shape\", DataTypeImpl::GetTensorType())\n .Alias(0, 0)\n .InputMemoryType(OrtMemTypeCPUInput, 1),\n Reshape);\n\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Reshape,\n kOnnxDomain,\n 14, 18,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .TypeConstraint(\"shape\", DataTypeImpl::GetTensorType())\n .Alias(0, 0)\n .InputMemoryType(OrtMemTypeCPUInput, 1),\n Reshape);\n\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Reshape,\n kOnnxDomain,\n 13, 13,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .TypeConstraint(\"shape\", DataTypeImpl::GetTensorType())\n .Alias(0, 0)\n .InputMemoryType(OrtMemTypeCPUInput, 1),\n Reshape);\n\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Reshape,\n kOnnxDomain,\n 5, 12,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes())\n .TypeConstraint(\"shape\", DataTypeImpl::GetTensorType())\n .Alias(0, 0)\n .InputMemoryType(OrtMemTypeCPUInput, 1),\n Reshape);\n\nONNX_OPERATOR_VERSIONED_KERNEL_EX(\n Reshape,\n kOnnxDomain,\n 1,\n 4,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create())\n .Alias(0, 0)\n .TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes()),\n Reshape_1);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n#include \"core/providers/cpu/tensor/reshape_helper.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass Reshape final : public CudaKernel {\n public:\n Reshape(const OpKernelInfo& info) : CudaKernel(info),\n allow_zero_(info.GetAttrOrDefault(\"allowzero\", static_cast(0)) == 1) {\n }\n\n Status ComputeInternal(OpKernelContext* context) const override {\n // Copy the second input tensor into the shape vector\n const Tensor* shapeTensor = context->Input(1);\n if (shapeTensor == nullptr) return Status(common::ONNXRUNTIME, common::FAIL, \"input count mismatch\");\n if (shapeTensor->Shape().NumDimensions() != 1) return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, \"A shape tensor must be a vector tensor, got \", shapeTensor->Shape().NumDimensions(), \" dimensions\");\n auto data_span = shapeTensor->template DataAsSpan();\n TensorShapeVector shape(data_span.begin(), data_span.end());\n const Tensor* X = context->Input(0);\n if (X == nullptr) return Status(common::ONNXRUNTIME, common::FAIL, \"input count mismatch\");\n const TensorShape& X_shape = X->Shape();\n\n ReshapeHelper helper(X_shape, shape, allow_zero_);\n\n Tensor* Y = context->Output(0, TensorShape(shape));\n const void* source = X->DataRaw();\n void* target = Y->MutableDataRaw();\n // If source and target pointers are not equal (non-inplace operation), we need to copy the data.\n if (target != source) {\n ORT_ENFORCE(context->GetComputeStream());\n ORT_RETURN_IF_ERROR(CopyTensor(*X, *Y, *context->GetComputeStream()));\n }\n\n return Status::OK();\n }\n\n private:\n bool allow_zero_;\n};\n\nclass Reshape_1 final : public CudaKernel {\n public:\n Reshape_1(const OpKernelInfo& info) : CudaKernel(info) {\n Status status = info.GetAttrs(\"shape\", shape_);\n ORT_ENFORCE(status.IsOK(), \"Attribute shape is not set.\");\n }\n\n Status ComputeInternal(OpKernelContext* context) const override {\n TensorShapeVector shape = shape_;\n const Tensor* X = context->Input(0);\n const TensorShape& X_shape = X->Shape();\n\n ReshapeHelper helper(X_shape, shape);\n\n Tensor* Y = context->Output(0, TensorShape(shape));\n const void* source = X->DataRaw();\n void* target = Y->MutableDataRaw();\n // If source and target pointers are not equal (non-inplace operation), we need to copy the data.\n if (target != source) {\n ORT_ENFORCE(context->GetComputeStream());\n ORT_RETURN_IF_ERROR(CopyTensor(*X, *Y, *context->GetComputeStream()));\n }\n\n return Status::OK();\n }\n\n private:\n TensorShapeVector shape_;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n#include \"core/providers/cpu/tensor/reshape_helper.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass Reshape final : public RocmKernel {\n public:\n Reshape(const OpKernelInfo& info) : RocmKernel(info),\n allow_zero_(info.GetAttrOrDefault(\"allowzero\", static_cast(0)) == 1) {\n }\n\n Status ComputeInternal(OpKernelContext* context) const override {\n // Copy the second input tensor into the shape vector\n const Tensor* shapeTensor = context->Input(1);\n if (shapeTensor == nullptr) return Status(common::ONNXRUNTIME, common::FAIL, \"input count mismatch\");\n if (shapeTensor->Shape().NumDimensions() != 1) return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, \"A shape tensor must be a vector tensor, got \", shapeTensor->Shape().NumDimensions(), \" dimensions\");\n auto data_span = shapeTensor->template DataAsSpan();\n TensorShapeVector shape(data_span.begin(), data_span.end());\n const Tensor* X = context->Input(0);\n if (X == nullptr) return Status(common::ONNXRUNTIME, common::FAIL, \"input count mismatch\");\n const TensorShape& X_shape = X->Shape();\n\n ReshapeHelper helper(X_shape, shape, allow_zero_);\n\n Tensor* Y = context->Output(0, TensorShape(shape));\n const void* source = X->DataRaw();\n void* target = Y->MutableDataRaw();\n // If source and target pointers are not equal (non-inplace operation), we need to copy the data.\n if (target != source) {\n ORT_ENFORCE(context->GetComputeStream());\n ORT_RETURN_IF_ERROR(CopyTensor(*X, *Y, *context->GetComputeStream()));\n }\n\n return Status::OK();\n }\n\n private:\n bool allow_zero_;\n};\n\nclass Reshape_1 final : public RocmKernel {\n public:\n Reshape_1(const OpKernelInfo& info) : RocmKernel(info) {\n Status status = info.GetAttrs(\"shape\", shape_);\n ORT_ENFORCE(status.IsOK(), \"Attribute shape is not set.\");\n }\n\n Status ComputeInternal(OpKernelContext* context) const override {\n TensorShapeVector shape = shape_;\n const Tensor* X = context->Input(0);\n const TensorShape& X_shape = X->Shape();\n\n ReshapeHelper helper(X_shape, shape);\n\n Tensor* Y = context->Output(0, TensorShape(shape));\n const void* source = X->DataRaw();\n void* target = Y->MutableDataRaw();\n // If source and target pointers are not equal (non-inplace operation), we need to copy the data.\n if (target != source) {\n ORT_ENFORCE(context->GetComputeStream());\n ORT_RETURN_IF_ERROR(CopyTensor(*X, *Y, *context->GetComputeStream()));\n }\n\n return Status::OK();\n }\n\n private:\n TensorShapeVector shape_;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"resize.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n#define REGISTER_KERNEL_TYPED(T) \\\n ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_EX( \\\n Resize, \\\n kOnnxDomain, \\\n 10, 10, \\\n T, \\\n kCudaExecutionProvider, \\\n (*KernelDefBuilder::Create()) \\\n .InputMemoryType(OrtMemTypeCPUInput, 1) \\\n .TypeConstraint(\"T\", DataTypeImpl::GetTensorType()), \\\n Resize); \\\n ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_EX( \\\n Resize, \\\n kOnnxDomain, \\\n 11, 12, \\\n T, \\\n kCudaExecutionProvider, \\\n (*KernelDefBuilder::Create()) \\\n .InputMemoryType(OrtMemTypeCPUInput, 1) \\\n .InputMemoryType(OrtMemTypeCPUInput, 2) \\\n .InputMemoryType(OrtMemTypeCPUInput, 3) \\\n .TypeConstraint(\"T1\", DataTypeImpl::GetTensorType()), \\\n Resize); \\\n ONNX_OPERATOR_TYPED_KERNEL_EX( \\\n Resize, \\\n kOnnxDomain, \\\n 13, \\\n T, \\\n kCudaExecutionProvider, \\\n (*KernelDefBuilder::Create()) \\\n .InputMemoryType(OrtMemTypeCPUInput, 1) \\\n .InputMemoryType(OrtMemTypeCPUInput, 2) \\\n .InputMemoryType(OrtMemTypeCPUInput, 3) \\\n .TypeConstraint(\"T1\", DataTypeImpl::GetTensorType()), \\\n Resize);\n\nREGISTER_KERNEL_TYPED(float)\nREGISTER_KERNEL_TYPED(double)\nREGISTER_KERNEL_TYPED(MLFloat16)\nREGISTER_KERNEL_TYPED(int32_t)\nREGISTER_KERNEL_TYPED(uint8_t)\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"resize.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n#define REGISTER_KERNEL_TYPED(T) \\\n ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_EX( \\\n Resize, \\\n kOnnxDomain, \\\n 10, 10, \\\n T, \\\n kRocmExecutionProvider, \\\n (*KernelDefBuilder::Create()) \\\n .InputMemoryType(OrtMemTypeCPUInput, 1) \\\n .TypeConstraint(\"T\", DataTypeImpl::GetTensorType()), \\\n Resize); \\\n ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_EX( \\\n Resize, \\\n kOnnxDomain, \\\n 11, 12, \\\n T, \\\n kRocmExecutionProvider, \\\n (*KernelDefBuilder::Create()) \\\n .InputMemoryType(OrtMemTypeCPUInput, 1) \\\n .InputMemoryType(OrtMemTypeCPUInput, 2) \\\n .InputMemoryType(OrtMemTypeCPUInput, 3) \\\n .TypeConstraint(\"T1\", DataTypeImpl::GetTensorType()), \\\n Resize); \\\n ONNX_OPERATOR_TYPED_KERNEL_EX( \\\n Resize, \\\n kOnnxDomain, \\\n 13, \\\n T, \\\n kRocmExecutionProvider, \\\n (*KernelDefBuilder::Create()) \\\n .InputMemoryType(OrtMemTypeCPUInput, 1) \\\n .InputMemoryType(OrtMemTypeCPUInput, 2) \\\n .InputMemoryType(OrtMemTypeCPUInput, 3) \\\n .TypeConstraint(\"T1\", DataTypeImpl::GetTensorType()), \\\n Resize);\n\nREGISTER_KERNEL_TYPED(float)\nREGISTER_KERNEL_TYPED(double)\nREGISTER_KERNEL_TYPED(MLFloat16)\nREGISTER_KERNEL_TYPED(int32_t)\nREGISTER_KERNEL_TYPED(uint8_t)\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/cuda/tensor/upsample.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \nclass Resize : public Upsample {\n public:\n Resize(const OpKernelInfo& info) : Upsample(info) {\n }\n\n Status ComputeInternal(OpKernelContext* context) const override {\n return Upsample::ComputeInternal(context);\n }\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/rocm/tensor/upsample.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nclass Resize : public Upsample {\n public:\n Resize(const OpKernelInfo& info) : Upsample(info) {\n }\n\n Status ComputeInternal(OpKernelContext* context) const override {\n return Upsample::ComputeInternal(context);\n }\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n#include \"core/common/common.h\"\n#include \"core/providers/cpu/tensor/upsamplebase.h\"\n#include \"core/providers/cuda/cuda_common.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nsize_t CalcResizeBufferSize(const onnxruntime::UpsampleMode upsample_mode,\n const gsl::span& output_dims);\n\ntemplate \nvoid ResizeImpl(\n cudaStream_t stream,\n const onnxruntime::UpsampleMode upsample_mode,\n const int rank,\n TArray& input_shape,\n TArray& output_shape,\n TArray& input_strides,\n TArray& output_div_pitches,\n TArray& scales_vals,\n TArray& roi,\n const T* input_data,\n T* output_data,\n const size_t N,\n bool extrapolation_enabled,\n const T extrapolation_value,\n float cubic_coeff_a,\n bool exclude_outside,\n onnxruntime::ResizeCoordinateTransformationMode coordinate_transform_mode,\n onnxruntime::ResizeNearestMode nearest_mode,\n void* dims_mapping);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n#include \n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n#include \"core/common/common.h\"\n#include \"core/providers/cpu/tensor/upsamplebase.h\"\n#include \"core/providers/rocm/rocm_common.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nsize_t CalcResizeBufferSize(const onnxruntime::UpsampleMode upsample_mode,\n const gsl::span& output_dims);\n\ntemplate \nvoid ResizeImpl(\n hipStream_t stream,\n const onnxruntime::UpsampleMode upsample_mode,\n const int rank,\n TArray& input_shape,\n TArray& output_shape,\n TArray& input_strides,\n TArray& output_div_pitches,\n TArray& scales_vals,\n TArray& roi,\n const T* input_data,\n T* output_data,\n const size_t N,\n bool extrapolation_enabled,\n const T extrapolation_value,\n float cubic_coeff_a,\n bool exclude_outside,\n onnxruntime::ResizeCoordinateTransformationMode coordinate_transform_mode,\n onnxruntime::ResizeNearestMode nearest_mode,\n void* dims_mapping);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"reverse_sequence.h\"\n#include \"reverse_sequence_impl.h\"\n\n#include \"core/providers/cpu/tensor/utils.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nONNX_OPERATOR_KERNEL_EX(\n ReverseSequence,\n kOnnxDomain,\n 10,\n kCudaExecutionProvider,\n (*KernelDefBuilder::Create()).TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes()),\n ReverseSequenceOp);\n\n#define ReverseSequenceCallCudaImplTypeAs(T, TEqual) \\\n if (X.IsDataType()) { \\\n CUDA_RETURN_IF_ERROR(ReverseSequenceCudaImpl( \\\n Stream(context), \\\n reinterpret_cast::MappedType*>(X.Data()), \\\n seq_lengths.Data(), \\\n reinterpret_cast::MappedType*>(Y.MutableData()), \\\n gsl::narrow(batch_size), gsl::narrow(max_seq_len), gsl::narrow(element_size), \\\n time_major_)); \\\n return Status::OK(); \\\n }\n\nStatus ReverseSequenceOp::ComputeInternal(OpKernelContext* context) const {\n const auto& X = *context->Input(0);\n const auto& dims = X.Shape();\n\n const auto batch_size = time_major_ ? dims[1] : dims[0];\n const auto max_seq_len = time_major_ ? dims[0] : dims[1];\n const auto element_size = dims.SizeFromDimension(2);\n\n const auto& seq_lengths = *context->Input(1);\n const auto& seq_len_shape = seq_lengths.Shape();\n\n if (seq_len_shape.NumDimensions() != 1 || seq_len_shape[0] != batch_size) {\n return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, \"sequence_lens shape must be {batch_size}. Got:\",\n seq_len_shape, \". batch_size=\", batch_size);\n }\n auto& Y = *context->Output(0, dims);\n\n ReverseSequenceCallCudaImplTypeAs(float, int32_t);\n ReverseSequenceCallCudaImplTypeAs(int32_t, int32_t);\n ReverseSequenceCallCudaImplTypeAs(uint32_t, int32_t);\n\n ReverseSequenceCallCudaImplTypeAs(MLFloat16, int16_t);\n ReverseSequenceCallCudaImplTypeAs(int16_t, int16_t);\n ReverseSequenceCallCudaImplTypeAs(uint16_t, int16_t);\n\n ReverseSequenceCallCudaImplTypeAs(int8_t, int8_t);\n ReverseSequenceCallCudaImplTypeAs(uint8_t, int8_t);\n ReverseSequenceCallCudaImplTypeAs(bool, int8_t);\n\n ReverseSequenceCallCudaImplTypeAs(int64_t, int64_t);\n ReverseSequenceCallCudaImplTypeAs(double, int64_t);\n ReverseSequenceCallCudaImplTypeAs(uint64_t, int64_t);\n\n return ORT_MAKE_STATUS(ONNXRUNTIME, NOT_IMPLEMENTED,\n \"Type for \", X.DataType(), \" is not supported yet in ReverseSequence.\");\n}\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#include \"reverse_sequence.h\"\n#include \"reverse_sequence_impl.h\"\n\n#include \"core/providers/cpu/tensor/utils.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nONNX_OPERATOR_KERNEL_EX(\n ReverseSequence,\n kOnnxDomain,\n 10,\n kRocmExecutionProvider,\n (*KernelDefBuilder::Create()).TypeConstraint(\"T\", DataTypeImpl::AllFixedSizeTensorTypes()),\n ReverseSequenceOp);\n\n#define ReverseSequenceCallCudaImplTypeAs(T, TEqual) \\\n if (X.IsDataType()) { \\\n HIP_RETURN_IF_ERROR(ReverseSequenceCudaImpl( \\\n Stream(context), \\\n reinterpret_cast::MappedType*>(X.Data()), \\\n seq_lengths.Data(), \\\n reinterpret_cast::MappedType*>(Y.MutableData()), \\\n gsl::narrow(batch_size), gsl::narrow(max_seq_len), gsl::narrow(element_size), \\\n time_major_)); \\\n return Status::OK(); \\\n }\n\nStatus ReverseSequenceOp::ComputeInternal(OpKernelContext* context) const {\n const auto& X = *context->Input(0);\n const auto& dims = X.Shape();\n\n const auto batch_size = time_major_ ? dims[1] : dims[0];\n const auto max_seq_len = time_major_ ? dims[0] : dims[1];\n const auto element_size = dims.SizeFromDimension(2);\n\n const auto& seq_lengths = *context->Input(1);\n const auto& seq_len_shape = seq_lengths.Shape();\n\n if (seq_len_shape.NumDimensions() != 1 || seq_len_shape[0] != batch_size) {\n return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, \"sequence_lens shape must be {batch_size}. Got:\",\n seq_len_shape, \". batch_size=\", batch_size);\n }\n auto& Y = *context->Output(0, dims);\n\n ReverseSequenceCallCudaImplTypeAs(float, int32_t);\n ReverseSequenceCallCudaImplTypeAs(int32_t, int32_t);\n ReverseSequenceCallCudaImplTypeAs(uint32_t, int32_t);\n\n ReverseSequenceCallCudaImplTypeAs(MLFloat16, int16_t);\n ReverseSequenceCallCudaImplTypeAs(int16_t, int16_t);\n ReverseSequenceCallCudaImplTypeAs(uint16_t, int16_t);\n\n ReverseSequenceCallCudaImplTypeAs(int8_t, int8_t);\n ReverseSequenceCallCudaImplTypeAs(uint8_t, int8_t);\n ReverseSequenceCallCudaImplTypeAs(bool, int8_t);\n\n ReverseSequenceCallCudaImplTypeAs(int64_t, int64_t);\n ReverseSequenceCallCudaImplTypeAs(double, int64_t);\n ReverseSequenceCallCudaImplTypeAs(uint64_t, int64_t);\n\n return ORT_MAKE_STATUS(ONNXRUNTIME, NOT_IMPLEMENTED,\n \"Type for \", X.DataType(), \" is not supported yet in ReverseSequence.\");\n}\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/cuda/cuda_kernel.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\nclass ReverseSequenceOp final : public CudaKernel {\n public:\n ReverseSequenceOp(const OpKernelInfo& info) : CudaKernel(info) {\n int64_t batch_axis;\n int64_t time_axis;\n ORT_ENFORCE(info.GetAttr(\"batch_axis\", &batch_axis).IsOK());\n ORT_ENFORCE(info.GetAttr(\"time_axis\", &time_axis).IsOK());\n\n ORT_ENFORCE(batch_axis < 2, \"Invalid batch_axis of \", batch_axis, \". Must be 0 or 1\");\n ORT_ENFORCE(time_axis < 2, \"Invalid time_axis of \", time_axis, \". Must be 0 or 1\");\n\n ORT_ENFORCE(batch_axis != time_axis,\n \"time_axis and batch_axis must have different values but both are \", time_axis);\n\n time_major_ = time_axis == 0;\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n bool time_major_;\n};\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \"core/providers/shared_library/provider_api.h\"\n#include \"core/providers/rocm/rocm_kernel.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\nclass ReverseSequenceOp final : public RocmKernel {\n public:\n ReverseSequenceOp(const OpKernelInfo& info) : RocmKernel(info) {\n int64_t batch_axis;\n int64_t time_axis;\n ORT_ENFORCE(info.GetAttr(\"batch_axis\", &batch_axis).IsOK());\n ORT_ENFORCE(info.GetAttr(\"time_axis\", &time_axis).IsOK());\n\n ORT_ENFORCE(batch_axis < 2, \"Invalid batch_axis of \", batch_axis, \". Must be 0 or 1\");\n ORT_ENFORCE(time_axis < 2, \"Invalid time_axis of \", time_axis, \". Must be 0 or 1\");\n\n ORT_ENFORCE(batch_axis != time_axis,\n \"time_axis and batch_axis must have different values but both are \", time_axis);\n\n time_major_ = time_axis == 0;\n }\n\n Status ComputeInternal(OpKernelContext* context) const override;\n\n private:\n bool time_major_;\n};\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n#include \"core/providers/cuda/shared_inc/cuda_utils.h\"\n\nnamespace onnxruntime {\nnamespace cuda {\n\ntemplate \ncudaError_t ReverseSequenceCudaImpl(\n cudaStream_t stream,\n const T* x_data,\n const int64_t* seq_len_data,\n T* y_data,\n const int batch_size,\n const int max_seq_len,\n const int element_size,\n const bool time_major);\n\n} // namespace cuda\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n#include \"core/providers/rocm/shared_inc/rocm_utils.h\"\n\nnamespace onnxruntime {\nnamespace rocm {\n\ntemplate \nhipError_t ReverseSequenceCudaImpl(\n hipStream_t stream,\n const T* x_data,\n const int64_t* seq_len_data,\n T* y_data,\n const int batch_size,\n const int max_seq_len,\n const int element_size,\n const bool time_major);\n\n} // namespace rocm\n} // namespace onnxruntime\n###" }, { "cuda": "\n// Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n\n#include \"core/providers/providers.h\"\n\nstruct OrtCUDAProviderOptions;\nstruct OrtCUDAProviderOptionsV2;\n\nnamespace onnxruntime {\n// defined in provider_bridge_ort.cc\nstruct CudaProviderFactoryCreator {\n static std::shared_ptr Create(const OrtCUDAProviderOptions* provider_options);\n static std::shared_ptr Create(const OrtCUDAProviderOptionsV2* provider_options);\n};\n} // namespace onnxruntime\n\n\n###", "hip": " // Copyright (c) Microsoft Corporation. All rights reserved.\n// Licensed under the MIT License.\n\n#pragma once\n\n#include \n\n#include \"core/providers/providers.h\"\n\nstruct OrtROCMProviderOptions;\nstruct OrtROCMProviderOptionsV2;\n\nnamespace onnxruntime {\n// defined in provider_bridge_ort.cc\nstruct CudaProviderFactoryCreator {\n static std::shared_ptr Create(const OrtROCMProviderOptions* provider_options);\n static std::shared_ptr Create(const OrtROCMProviderOptionsV2* provider_options);\n};\n} // namespace onnxruntime\n###" } ]