hip
stringlengths 140
3.32k
| cuda
stringlengths 84
3.33k
|
---|---|
// !!! This is a file automatically generated by hipify!!!
#include <ATen/hip\HIPContext.h>
#include <hip/hip_runtime.h>
namespace at { namespace hip {
/**
Computes ceil(a / b)
*/
template <typename T>
__host__ __device__ __forceinline__ T ATenCeilDiv(T a, T b) {
return (a + b - 1) / b;
}
namespace {
// Threads per block for our apply kernel
// FIXME: use occupancy calculator instead
constexpr uint32_t AT_APPLY_THREADS_PER_BLOCK = 512;
constexpr uint32_t AT_APPLY_BLOCKS_PER_SM = 4;
template <int step = 1>
inline bool getApplyGrid(uint64_t totalElements, dim3& grid, int64_t curDevice, int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK) {
if (curDevice == -1) return false;
uint64_t numel_per_thread = static_cast<uint64_t>(max_threads_per_block) * static_cast<uint64_t>(step);
uint64_t numBlocks = ATenCeilDiv(totalElements, numel_per_thread);
uint64_t maxGridX = at::cuda::getDeviceProperties(curDevice)->maxGridSize[0];
if (numBlocks > maxGridX)
numBlocks = maxGridX;
grid = dim3(numBlocks);
return true;
}
constexpr int getApplyBlocksPerSM() {
return AT_APPLY_BLOCKS_PER_SM;
}
constexpr int getApplyBlockSize() {
return AT_APPLY_THREADS_PER_BLOCK;
}
inline dim3 getApplyBlock(int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK) {
return dim3(max_threads_per_block);
}
}
}} // namespace at::cuda
### |
#include <ATen/cuda/CUDAContext.h>
#include <cuda_runtime.h>
namespace at { namespace cuda {
/**
Computes ceil(a / b)
*/
template <typename T>
__host__ __device__ __forceinline__ T ATenCeilDiv(T a, T b) {
return (a + b - 1) / b;
}
namespace {
// Threads per block for our apply kernel
// FIXME: use occupancy calculator instead
constexpr uint32_t AT_APPLY_THREADS_PER_BLOCK = 512;
constexpr uint32_t AT_APPLY_BLOCKS_PER_SM = 4;
template <int step = 1>
inline bool getApplyGrid(uint64_t totalElements, dim3& grid, int64_t curDevice, int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK) {
if (curDevice == -1) return false;
uint64_t numel_per_thread = static_cast<uint64_t>(max_threads_per_block) * static_cast<uint64_t>(step);
uint64_t numBlocks = ATenCeilDiv(totalElements, numel_per_thread);
uint64_t maxGridX = at::cuda::getDeviceProperties(curDevice)->maxGridSize[0];
if (numBlocks > maxGridX)
numBlocks = maxGridX;
grid = dim3(numBlocks);
return true;
}
constexpr int getApplyBlocksPerSM() {
return AT_APPLY_BLOCKS_PER_SM;
}
constexpr int getApplyBlockSize() {
return AT_APPLY_THREADS_PER_BLOCK;
}
inline dim3 getApplyBlock(int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK) {
return dim3(max_threads_per_block);
}
}
}} // namespace at::cuda
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/hip\HIPContext.h>
#include <ATen/native/Repeat.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/repeat_interleave_native.h>
#endif
template <typename index_t>
__global__ static void compute_hip_kernel(
index_t* repeat_ptr,
int64_t* cumsum_ptr,
index_t* result_ptr,
int64_t size,
int64_t result_size) {
CUDA_KERNEL_ASSERT(result_size == cumsum_ptr[size - 1]);
int64_t idx = blockIdx.x * blockDim.x + threadIdx.x;
int64_t stride = (blockDim.x * gridDim.x) / C10_WARP_SIZE;
int warp_id = idx / C10_WARP_SIZE;
int tid_in_warp = idx % C10_WARP_SIZE;
for (int64_t i = warp_id; i < size; i += stride) {
int64_t end = cumsum_ptr[i];
index_t repeat = repeat_ptr[i];
CUDA_KERNEL_ASSERT(repeat >= 0);
int64_t start = end - repeat;
for (int64_t j = start + tid_in_warp; j < end; j += C10_WARP_SIZE) {
result_ptr[j] = i;
}
}
}
template <typename index_t>
static void compute_hip(
index_t* repeat_ptr,
int64_t* cumsum_ptr,
index_t* result_ptr,
int64_t size,
int64_t result_size) {
int64_t block = 512;
int64_t warps_per_block = block / at::cuda::warp_size();
int64_t grid =
std::min<int64_t>((size + warps_per_block - 1) / warps_per_block, 2048L);
hipLaunchKernelGGL(( compute_hip_kernel), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStream(),
repeat_ptr, cumsum_ptr, result_ptr, size, result_size);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
namespace at::native {
Tensor repeat_interleave_hip(
const Tensor& repeat,
c10::optional<int64_t> output_size) {
Tensor output;
AT_DISPATCH_INDEX_TYPES(
repeat.scalar_type(), "repeat_interleave_hip", [&]() {
output = repeat_interleave_common<index_t, compute_hip<index_t>>(
repeat, output_size);
});
return output;
}
} // namespace at::native
### |
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/Repeat.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/repeat_interleave_native.h>
#endif
template <typename index_t>
__global__ static void compute_cuda_kernel(
index_t* repeat_ptr,
int64_t* cumsum_ptr,
index_t* result_ptr,
int64_t size,
int64_t result_size) {
CUDA_KERNEL_ASSERT(result_size == cumsum_ptr[size - 1]);
int64_t idx = blockIdx.x * blockDim.x + threadIdx.x;
int64_t stride = (blockDim.x * gridDim.x) / C10_WARP_SIZE;
int warp_id = idx / C10_WARP_SIZE;
int tid_in_warp = idx % C10_WARP_SIZE;
for (int64_t i = warp_id; i < size; i += stride) {
int64_t end = cumsum_ptr[i];
index_t repeat = repeat_ptr[i];
CUDA_KERNEL_ASSERT(repeat >= 0);
int64_t start = end - repeat;
for (int64_t j = start + tid_in_warp; j < end; j += C10_WARP_SIZE) {
result_ptr[j] = i;
}
}
}
template <typename index_t>
static void compute_cuda(
index_t* repeat_ptr,
int64_t* cumsum_ptr,
index_t* result_ptr,
int64_t size,
int64_t result_size) {
int64_t block = 512;
int64_t warps_per_block = block / at::cuda::warp_size();
int64_t grid =
std::min<int64_t>((size + warps_per_block - 1) / warps_per_block, 2048L);
compute_cuda_kernel<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
repeat_ptr, cumsum_ptr, result_ptr, size, result_size);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
namespace at::native {
Tensor repeat_interleave_cuda(
const Tensor& repeat,
c10::optional<int64_t> output_size) {
Tensor output;
AT_DISPATCH_INDEX_TYPES(
repeat.scalar_type(), "repeat_interleave_cuda", [&]() {
output = repeat_interleave_common<index_t, compute_cuda<index_t>>(
repeat, output_size);
});
return output;
}
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <ATen/native/hip\jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_HIP char scaled_modified_bessel_k0_name[] = "scaled_modified_bessel_k0_forward";
void scaled_modified_bessel_k0_kernel_hip(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "scaled_modified_bessel_k0_hip", [&]() {
jitted_gpu_kernel<scaled_modified_bessel_k0_name, scalar_t, scalar_t, 1>(iterator, scaled_modified_bessel_k0_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "scaled_modified_bessel_k0_hip", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return scaled_modified_bessel_k0_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
}
REGISTER_DISPATCH(special_scaled_modified_bessel_k0_stub, &scaled_modified_bessel_k0_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char scaled_modified_bessel_k0_name[] = "scaled_modified_bessel_k0_forward";
void scaled_modified_bessel_k0_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "scaled_modified_bessel_k0_cuda", [&]() {
jitted_gpu_kernel<scaled_modified_bessel_k0_name, scalar_t, scalar_t, 1>(iterator, scaled_modified_bessel_k0_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "scaled_modified_bessel_k0_cuda", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return scaled_modified_bessel_k0_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
}
REGISTER_DISPATCH(special_scaled_modified_bessel_k0_stub, &scaled_modified_bessel_k0_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <ATen/native/hip\jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_HIP char scaled_modified_bessel_k1_name[] = "scaled_modified_bessel_k1_forward";
void scaled_modified_bessel_k1_kernel_hip(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "scaled_modified_bessel_k1_hip", [&]() {
jitted_gpu_kernel<scaled_modified_bessel_k1_name, scalar_t, scalar_t, 1>(iterator, scaled_modified_bessel_k1_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "scaled_modified_bessel_k1_hip", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return scaled_modified_bessel_k1_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
}
REGISTER_DISPATCH(special_scaled_modified_bessel_k1_stub, &scaled_modified_bessel_k1_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char scaled_modified_bessel_k1_name[] = "scaled_modified_bessel_k1_forward";
void scaled_modified_bessel_k1_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "scaled_modified_bessel_k1_cuda", [&]() {
jitted_gpu_kernel<scaled_modified_bessel_k1_name, scalar_t, scalar_t, 1>(iterator, scaled_modified_bessel_k1_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "scaled_modified_bessel_k1_cuda", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return scaled_modified_bessel_k1_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
}
REGISTER_DISPATCH(special_scaled_modified_bessel_k1_stub, &scaled_modified_bessel_k1_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/hip\Math.cuh>
#include <ATen/native/hip\jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_HIP char shifted_chebyshev_polynomial_t_name[] = "shifted_chebyshev_polynomial_t_forward";
void shifted_chebyshev_polynomial_t_kernel_hip(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_t_hip", [&]() {
opmath_jitted_gpu_kernel_with_scalars<shifted_chebyshev_polynomial_t_name, scalar_t, scalar_t>(iterator, shifted_chebyshev_polynomial_t_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_t_hip", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return shifted_chebyshev_polynomial_t_forward<scalar_t, true>(x, n);
});
});
#endif
} // shifted_chebyshev_polynomial_t_kernel_hip
} // namespace (anonymous)
REGISTER_DISPATCH(shifted_chebyshev_polynomial_t_stub, &shifted_chebyshev_polynomial_t_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char shifted_chebyshev_polynomial_t_name[] = "shifted_chebyshev_polynomial_t_forward";
void shifted_chebyshev_polynomial_t_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_t_cuda", [&]() {
opmath_jitted_gpu_kernel_with_scalars<shifted_chebyshev_polynomial_t_name, scalar_t, scalar_t>(iterator, shifted_chebyshev_polynomial_t_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_t_cuda", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return shifted_chebyshev_polynomial_t_forward<scalar_t, true>(x, n);
});
});
#endif
} // shifted_chebyshev_polynomial_t_kernel_cuda
} // namespace (anonymous)
REGISTER_DISPATCH(shifted_chebyshev_polynomial_t_stub, &shifted_chebyshev_polynomial_t_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/hip\Math.cuh>
#include <ATen/native/hip\jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_HIP char shifted_chebyshev_polynomial_u_name[] = "shifted_chebyshev_polynomial_u_forward";
void shifted_chebyshev_polynomial_u_kernel_hip(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_u_hip", [&]() {
opmath_jitted_gpu_kernel_with_scalars<shifted_chebyshev_polynomial_u_name, scalar_t, scalar_t>(iterator, shifted_chebyshev_polynomial_u_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_u_hip", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return shifted_chebyshev_polynomial_u_forward<scalar_t, true>(x, n);
});
});
#endif
} // shifted_chebyshev_polynomial_u_kernel_hip
} // namespace (anonymous)
REGISTER_DISPATCH(shifted_chebyshev_polynomial_u_stub, &shifted_chebyshev_polynomial_u_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char shifted_chebyshev_polynomial_u_name[] = "shifted_chebyshev_polynomial_u_forward";
void shifted_chebyshev_polynomial_u_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_u_cuda", [&]() {
opmath_jitted_gpu_kernel_with_scalars<shifted_chebyshev_polynomial_u_name, scalar_t, scalar_t>(iterator, shifted_chebyshev_polynomial_u_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_u_cuda", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return shifted_chebyshev_polynomial_u_forward<scalar_t, true>(x, n);
});
});
#endif
} // shifted_chebyshev_polynomial_u_kernel_cuda
} // namespace (anonymous)
REGISTER_DISPATCH(shifted_chebyshev_polynomial_u_stub, &shifted_chebyshev_polynomial_u_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/hip\Math.cuh>
#include <ATen/native/hip\jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_HIP char shifted_chebyshev_polynomial_v_name[] = "shifted_chebyshev_polynomial_v_forward";
void shifted_chebyshev_polynomial_v_kernel_hip(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_v_hip", [&]() {
opmath_jitted_gpu_kernel_with_scalars<shifted_chebyshev_polynomial_v_name, scalar_t, scalar_t>(iterator, shifted_chebyshev_polynomial_v_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_v_hip", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return shifted_chebyshev_polynomial_v_forward<scalar_t, true>(x, n);
});
});
#endif
} // shifted_chebyshev_polynomial_v_kernel_hip
} // namespace (anonymous)
REGISTER_DISPATCH(shifted_chebyshev_polynomial_v_stub, &shifted_chebyshev_polynomial_v_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char shifted_chebyshev_polynomial_v_name[] = "shifted_chebyshev_polynomial_v_forward";
void shifted_chebyshev_polynomial_v_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_v_cuda", [&]() {
opmath_jitted_gpu_kernel_with_scalars<shifted_chebyshev_polynomial_v_name, scalar_t, scalar_t>(iterator, shifted_chebyshev_polynomial_v_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_v_cuda", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return shifted_chebyshev_polynomial_v_forward<scalar_t, true>(x, n);
});
});
#endif
} // shifted_chebyshev_polynomial_v_kernel_cuda
} // namespace (anonymous)
REGISTER_DISPATCH(shifted_chebyshev_polynomial_v_stub, &shifted_chebyshev_polynomial_v_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/hip\Math.cuh>
#include <ATen/native/hip\jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_HIP char shifted_chebyshev_polynomial_w_name[] = "shifted_chebyshev_polynomial_w_forward";
void shifted_chebyshev_polynomial_w_kernel_hip(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_w_hip", [&]() {
opmath_jitted_gpu_kernel_with_scalars<shifted_chebyshev_polynomial_w_name, scalar_t, scalar_t>(iterator, shifted_chebyshev_polynomial_w_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_w_hip", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return shifted_chebyshev_polynomial_w_forward<scalar_t, true>(x, n);
});
});
#endif
} // shifted_chebyshev_polynomial_w_kernel_hip
} // namespace (anonymous)
REGISTER_DISPATCH(shifted_chebyshev_polynomial_w_stub, &shifted_chebyshev_polynomial_w_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char shifted_chebyshev_polynomial_w_name[] = "shifted_chebyshev_polynomial_w_forward";
void shifted_chebyshev_polynomial_w_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_w_cuda", [&]() {
opmath_jitted_gpu_kernel_with_scalars<shifted_chebyshev_polynomial_w_name, scalar_t, scalar_t>(iterator, shifted_chebyshev_polynomial_w_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_w_cuda", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return shifted_chebyshev_polynomial_w_forward<scalar_t, true>(x, n);
});
});
#endif
} // shifted_chebyshev_polynomial_w_kernel_cuda
} // namespace (anonymous)
REGISTER_DISPATCH(shifted_chebyshev_polynomial_w_stub, &shifted_chebyshev_polynomial_w_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <thrust/execution_policy.h>
#include <thrust/sort.h>
namespace at::native {
std::vector<int64_t> infer_dense_strides_dim_last(const Tensor & self, int64_t dim) {
int64_t ndim = self.dim();
// sort the strides in descending order according to its value,
// keeping dim the last.
std::vector<int64_t> strides = self.strides().vec();
strides[dim] = -1;
std::vector<int64_t> original_dim(ndim);
for (int64_t i = 0; i < ndim; i++) {
original_dim[i] = i;
}
thrust::stable_sort_by_key(
thrust::host, strides.data(), strides.data() + ndim, original_dim.data(),
thrust::greater<int64_t>()
);
// generate contiguous strides on permuted dims
std::vector<int64_t> new_strides(ndim);
std::vector<int64_t> new_strides_unsort(ndim);
int64_t cumprod = 1;
for (int64_t i = 0; i < ndim; i++) {
new_strides[ndim - 1 - i] = cumprod;
cumprod *= self.sizes()[original_dim[ndim - 1 - i]];
}
// unsort new strides
for (int64_t i = 0; i < ndim; i++) {
new_strides_unsort[original_dim[i]] = new_strides[i];
}
return new_strides_unsort;
}
} // namespace at::native
### |
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <thrust/execution_policy.h>
#include <thrust/sort.h>
namespace at::native {
std::vector<int64_t> infer_dense_strides_dim_last(const Tensor & self, int64_t dim) {
int64_t ndim = self.dim();
// sort the strides in descending order according to its value,
// keeping dim the last.
std::vector<int64_t> strides = self.strides().vec();
strides[dim] = -1;
std::vector<int64_t> original_dim(ndim);
for (int64_t i = 0; i < ndim; i++) {
original_dim[i] = i;
}
thrust::stable_sort_by_key(
thrust::host, strides.data(), strides.data() + ndim, original_dim.data(),
thrust::greater<int64_t>()
);
// generate contiguous strides on permuted dims
std::vector<int64_t> new_strides(ndim);
std::vector<int64_t> new_strides_unsort(ndim);
int64_t cumprod = 1;
for (int64_t i = 0; i < ndim; i++) {
new_strides[ndim - 1 - i] = cumprod;
cumprod *= self.sizes()[original_dim[ndim - 1 - i]];
}
// unsort new strides
for (int64_t i = 0; i < ndim; i++) {
new_strides_unsort[original_dim[i]] = new_strides[i];
}
return new_strides_unsort;
}
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <c10/util/Exception.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/sspaddmm_native.h>
#endif
namespace at::native {
// sparse, sparse, sparse, dense, real, real -> sparse
Tensor& _sspaddmm_out_only_sparse_hip(const Tensor& self,
const Tensor& mat1, const Tensor& mat2, const Scalar& beta, const Scalar& alpha, Tensor& result) {
AT_ERROR("tensor.sspaddmm(...) can only be called on sparse tensors");
}
Tensor& _sspaddmm_out_hip(const Tensor& self,
const Tensor& mat1, const Tensor& mat2, const Scalar& beta, const Scalar& alpha, Tensor& result) {
AT_ERROR("NYI: HIP sspaddmm is not implemented");
}
} // namespace at::native
### |
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <c10/util/Exception.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/sspaddmm_native.h>
#endif
namespace at::native {
// sparse, sparse, sparse, dense, real, real -> sparse
Tensor& _sspaddmm_out_only_sparse_cuda(const Tensor& self,
const Tensor& mat1, const Tensor& mat2, const Scalar& beta, const Scalar& alpha, Tensor& result) {
AT_ERROR("tensor.sspaddmm(...) can only be called on sparse tensors");
}
Tensor& _sspaddmm_out_cuda(const Tensor& self,
const Tensor& mat1, const Tensor& mat2, const Scalar& beta, const Scalar& alpha, Tensor& result) {
AT_ERROR("NYI: CUDA sspaddmm is not implemented");
}
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <ATen/native/hip\jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_HIP char spherical_bessel_j0_name[] = "spherical_bessel_j0_forward";
void spherical_bessel_j0_kernel_hip(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "spherical_bessel_j0_hip", [&]() {
jitted_gpu_kernel<spherical_bessel_j0_name, scalar_t, scalar_t, 1>(iterator, spherical_bessel_j0_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "spherical_bessel_j0_hip", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return spherical_bessel_j0_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
}
REGISTER_DISPATCH(special_spherical_bessel_j0_stub, &spherical_bessel_j0_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char spherical_bessel_j0_name[] = "spherical_bessel_j0_forward";
void spherical_bessel_j0_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "spherical_bessel_j0_cuda", [&]() {
jitted_gpu_kernel<spherical_bessel_j0_name, scalar_t, scalar_t, 1>(iterator, spherical_bessel_j0_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "spherical_bessel_j0_cuda", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return spherical_bessel_j0_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
}
REGISTER_DISPATCH(special_spherical_bessel_j0_stub, &spherical_bessel_j0_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#pragma once
#include <ATen/core/TensorBase.h>
#include <ATen/hip/detail\TensorInfo.cuh>
#include <ATen/native/CanUse32BitIndexMath.h>
namespace at {
namespace hip {
namespace detail {
TORCH_HIP_CU_API bool maybeOverlappingIndices(const at::TensorBase &t);
using at::native::canUse32BitIndexMath;
template <typename scalar, typename IndexType>
TensorInfo<scalar, IndexType>
getTensorInfo(const at::TensorBase &t) {
IndexType sz[MAX_TENSORINFO_DIMS];
IndexType st[MAX_TENSORINFO_DIMS];
int dims = t.dim();
for (int i = 0; i < dims; ++i) {
sz[i] = t.size(i);
st[i] = t.stride(i);
}
return TensorInfo<scalar, IndexType>(
t.data_ptr<scalar>(), dims, sz, st);
}
} // detail
} // cuda
} // at
### |
#pragma once
#include <ATen/core/TensorBase.h>
#include <ATen/cuda/detail/TensorInfo.cuh>
#include <ATen/native/CanUse32BitIndexMath.h>
namespace at {
namespace cuda {
namespace detail {
TORCH_CUDA_CU_API bool maybeOverlappingIndices(const at::TensorBase &t);
using at::native::canUse32BitIndexMath;
template <typename scalar, typename IndexType>
TensorInfo<scalar, IndexType>
getTensorInfo(const at::TensorBase &t) {
IndexType sz[MAX_TENSORINFO_DIMS];
IndexType st[MAX_TENSORINFO_DIMS];
int dims = t.dim();
for (int i = 0; i < dims; ++i) {
sz[i] = t.size(i);
st[i] = t.stride(i);
}
return TensorInfo<scalar, IndexType>(
t.data_ptr<scalar>(), dims, sz, st);
}
} // detail
} // cuda
} // at
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
#include <c10/util/BFloat16-math.h>
// NOTE: HIP on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native {
void nextafter_kernel_hip(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND(kBFloat16, iter.common_dtype(), "nextafter_hip", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return std::nextafter(a, b);
});
});
}
void heaviside_kernel_hip(TensorIteratorBase& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBool, kBFloat16, iter.dtype(), "heaviside_hip", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a == 0 ? b : static_cast<scalar_t>(a > 0);
});
});
}
REGISTER_DISPATCH(nextafter_stub, &nextafter_kernel_hip);
REGISTER_DISPATCH(heaviside_stub, &heaviside_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
#include <c10/util/BFloat16-math.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native {
void nextafter_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND(kBFloat16, iter.common_dtype(), "nextafter_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return std::nextafter(a, b);
});
});
}
void heaviside_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBool, kBFloat16, iter.dtype(), "heaviside_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a == 0 ? b : static_cast<scalar_t>(a > 0);
});
});
}
REGISTER_DISPATCH(nextafter_stub, &nextafter_kernel_cuda);
REGISTER_DISPATCH(heaviside_stub, &heaviside_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <limits>
namespace at::native {
#if 0 && AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_HIP char acosh_name[] = "acosh_impl";
#endif
void acosh_kernel_hip(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if(at::isComplexType(common_dtype)) {
// Disabled due to accuracy issues
#if 0 && AT_USE_JITERATOR()
static const auto acosh_string = jiterator_stringify(
template <typename T>
T acosh_impl(T a) {
return std::acosh(a);
}
);
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "acosh_name", [&]() {
jitted_gpu_kernel<
/*name=*/ acosh_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, acosh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "acosh_name", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::acosh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
common_dtype, "acosh_hip",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::acosh(a);
});
});
}
}
REGISTER_DISPATCH(acosh_stub, &acosh_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <limits>
namespace at::native {
#if 0 && AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char acosh_name[] = "acosh_impl";
#endif
void acosh_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if(at::isComplexType(common_dtype)) {
// Disabled due to accuracy issues
#if 0 && AT_USE_JITERATOR()
static const auto acosh_string = jiterator_stringify(
template <typename T>
T acosh_impl(T a) {
return std::acosh(a);
}
);
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "acosh_name", [&]() {
jitted_gpu_kernel<
/*name=*/ acosh_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, acosh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "acosh_name", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::acosh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
common_dtype, "acosh_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::acosh(a);
});
});
}
}
REGISTER_DISPATCH(acosh_stub, &acosh_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <limits>
namespace at::native {
#if 0 && AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_HIP char acos_name[] = "acos_impl";
#endif
void acos_kernel_hip(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
// Disabled due to accuracy issues
#if 0 && AT_USE_JITERATOR()
static const auto acos_string = jiterator_stringify(
template <typename T> T acos_impl(T a) { return std::acos(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "acos_name", [&]() {
jitted_gpu_kernel<
/*name=*/acos_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, acos_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "acos_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::acos(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"acos_hip",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::acos(a);
});
});
}
}
REGISTER_DISPATCH(acos_stub, &acos_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <limits>
namespace at::native {
#if 0 && AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char acos_name[] = "acos_impl";
#endif
void acos_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
// Disabled due to accuracy issues
#if 0 && AT_USE_JITERATOR()
static const auto acos_string = jiterator_stringify(
template <typename T> T acos_impl(T a) { return std::acos(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "acos_name", [&]() {
jitted_gpu_kernel<
/*name=*/acos_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, acos_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "acos_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::acos(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"acos_cuda",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::acos(a);
});
});
}
}
REGISTER_DISPATCH(acos_stub, &acos_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <limits>
namespace at::native {
#if 0 && AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_HIP char asinh_name[] = "asinh_impl";
#endif
void asinh_kernel_hip(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
// Disabled due to accuracy issues
#if 0 && AT_USE_JITERATOR()
static const auto asinh_string = jiterator_stringify(
template <typename T> T asinh_impl(T a) { return std::asinh(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "asinh_name", [&]() {
jitted_gpu_kernel<
/*name=*/asinh_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, asinh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "asinh_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::asinh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"asinh_hip",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::asinh(a);
});
});
}
}
REGISTER_DISPATCH(asinh_stub, &asinh_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <limits>
namespace at::native {
#if 0 && AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char asinh_name[] = "asinh_impl";
#endif
void asinh_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
// Disabled due to accuracy issues
#if 0 && AT_USE_JITERATOR()
static const auto asinh_string = jiterator_stringify(
template <typename T> T asinh_impl(T a) { return std::asinh(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "asinh_name", [&]() {
jitted_gpu_kernel<
/*name=*/asinh_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, asinh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "asinh_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::asinh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"asinh_cuda",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::asinh(a);
});
});
}
}
REGISTER_DISPATCH(asinh_stub, &asinh_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <limits>
namespace at::native {
#if 0 && AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_HIP char asin_name[] = "asin_impl";
#endif
void asin_kernel_hip(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
// Disabled due to accuracy issues
#if 0 && AT_USE_JITERATOR()
static const auto asin_string = jiterator_stringify(
template <typename T> T asin_impl(T a) { return std::asin(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "asin_name", [&]() {
jitted_gpu_kernel<
/*name=*/asin_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, asin_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "asin_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::asin(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf, kBFloat16, common_dtype, "asin_hip", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::asin(a);
});
});
}
}
REGISTER_DISPATCH(asin_stub, &asin_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <limits>
namespace at::native {
#if 0 && AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char asin_name[] = "asin_impl";
#endif
void asin_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
// Disabled due to accuracy issues
#if 0 && AT_USE_JITERATOR()
static const auto asin_string = jiterator_stringify(
template <typename T> T asin_impl(T a) { return std::asin(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "asin_name", [&]() {
jitted_gpu_kernel<
/*name=*/asin_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, asin_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "asin_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::asin(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf, kBFloat16, common_dtype, "asin_cuda", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::asin(a);
});
});
}
}
REGISTER_DISPATCH(asin_stub, &asin_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <limits>
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_HIP char atanh_name[] = "atanh_impl";
#endif
void atanh_kernel_hip(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto atanh_string = jiterator_stringify(
template <typename T> T atanh_impl(T a) { return std::atanh(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "atanh_name", [&]() {
jitted_gpu_kernel<
/*name=*/atanh_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, atanh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "atanh_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::atanh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"atanh_hip",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::atanh(a);
});
});
}
}
REGISTER_DISPATCH(atanh_stub, &atanh_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <limits>
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char atanh_name[] = "atanh_impl";
#endif
void atanh_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto atanh_string = jiterator_stringify(
template <typename T> T atanh_impl(T a) { return std::atanh(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "atanh_name", [&]() {
jitted_gpu_kernel<
/*name=*/atanh_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, atanh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "atanh_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::atanh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"atanh_cuda",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::atanh(a);
});
});
}
}
REGISTER_DISPATCH(atanh_stub, &atanh_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <limits>
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_HIP char atan_name[] = "atan_impl";
#endif
void atan_kernel_hip(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto atan_string = jiterator_stringify(
template <typename T>
T atan_impl(T a) {
return std::atan(a);
}
);
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "atan_name", [&]() {
jitted_gpu_kernel<
/*name=*/ atan_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, atan_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "atan_name", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::atan(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
common_dtype, "atan_hip",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::atan(a);
});
});
}
}
REGISTER_DISPATCH(atan_stub, &atan_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <limits>
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char atan_name[] = "atan_impl";
#endif
void atan_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto atan_string = jiterator_stringify(
template <typename T>
T atan_impl(T a) {
return std::atan(a);
}
);
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "atan_name", [&]() {
jitted_gpu_kernel<
/*name=*/ atan_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, atan_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "atan_name", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::atan(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
common_dtype, "atan_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::atan(a);
});
});
}
}
REGISTER_DISPATCH(atan_stub, &atan_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <limits>
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_HIP char cosh_name[] = "cosh_impl";
#endif
void cosh_kernel_hip(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto cosh_string = jiterator_stringify(
template <typename T> T cosh_impl(T a) { return std::cosh(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "cosh_name", [&]() {
jitted_gpu_kernel<
/*name=*/cosh_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, cosh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "cosh_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::cosh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"cosh_hip",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::cosh(a);
});
});
}
}
REGISTER_DISPATCH(cosh_stub, &cosh_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <limits>
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char cosh_name[] = "cosh_impl";
#endif
void cosh_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto cosh_string = jiterator_stringify(
template <typename T> T cosh_impl(T a) { return std::cosh(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "cosh_name", [&]() {
jitted_gpu_kernel<
/*name=*/cosh_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, cosh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "cosh_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::cosh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"cosh_cuda",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::cosh(a);
});
});
}
}
REGISTER_DISPATCH(cosh_stub, &cosh_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <limits>
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_HIP char cos_name[] = "cos_impl";
#endif // AT_USE_JITERATOR()
void cos_kernel_hip(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto cos_string = jiterator_stringify(
template <typename T> T cos_impl(T a) { return std::cos(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "cos_name", [&]() {
jitted_gpu_kernel<
/*name=*/cos_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, cos_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "cos_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::cos(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"cos_hip",
[&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::cos(a); });
});
}
}
REGISTER_DISPATCH(cos_stub, &cos_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <limits>
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char cos_name[] = "cos_impl";
#endif // AT_USE_JITERATOR()
void cos_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto cos_string = jiterator_stringify(
template <typename T> T cos_impl(T a) { return std::cos(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "cos_name", [&]() {
jitted_gpu_kernel<
/*name=*/cos_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, cos_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "cos_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::cos(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"cos_cuda",
[&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::cos(a); });
});
}
}
REGISTER_DISPATCH(cos_stub, &cos_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <limits>
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_HIP char sinh_name[] = "sinh_impl";
#endif
void sinh_kernel_hip(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto sinh_string = jiterator_stringify(
template <typename T> T sinh_impl(T a) { return std::sinh(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "sinh_name", [&]() {
jitted_gpu_kernel<
/*name=*/sinh_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, sinh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "sinh_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::sinh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"sinh_hip",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::sinh(a);
});
});
}
}
REGISTER_DISPATCH(sinh_stub, &sinh_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <limits>
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char sinh_name[] = "sinh_impl";
#endif
void sinh_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto sinh_string = jiterator_stringify(
template <typename T> T sinh_impl(T a) { return std::sinh(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "sinh_name", [&]() {
jitted_gpu_kernel<
/*name=*/sinh_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, sinh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "sinh_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::sinh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"sinh_cuda",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::sinh(a);
});
});
}
}
REGISTER_DISPATCH(sinh_stub, &sinh_kernel_cuda);
} // namespace at::native
### |
#pragma once
#include <assert.h>
#if defined(__HIP_ARCH__) || defined(__HIP_DEVICE_COMPILE__)
#include <hip/hip_runtime.h>
#endif
namespace at {
namespace hip {
namespace detail {
template <typename Value>
struct DivMod {
Value div, mod;
C10_HOST_DEVICE DivMod(Value div, Value mod) : div(div), mod(mod) { }
};
template <typename Value>
struct IntDivider {
IntDivider() = default;
IntDivider(Value d) : divisor(d) { }
C10_HOST_DEVICE inline Value div(Value n) const { return n / divisor; }
C10_HOST_DEVICE inline Value mod(Value n) const { return n % divisor; }
C10_HOST_DEVICE inline DivMod<Value> divmod(Value n) const {
return DivMod<Value>(n / divisor, n % divisor);
}
Value divisor;
};
template <>
struct IntDivider<unsigned int> {
static_assert(sizeof(unsigned int) == 4, "Assumes 32-bit unsigned int.");
IntDivider() = default;
IntDivider(unsigned int d) : divisor(d) {
assert(divisor >= 1 && divisor <= INT32_MAX);
for (shift = 0; shift < 32; shift++) if ((1U << shift) >= divisor) break;
uint64_t one = 1;
uint64_t magic = ((one << 32) * ((one << shift) - divisor)) / divisor + 1;
m1 = magic;
assert(m1 > 0 && m1 == magic);
}
C10_HOST_DEVICE inline unsigned int div(unsigned int n) const {
#if defined(__HIP_ARCH__) || defined(__HIP_DEVICE_COMPILE__)
unsigned int t = __umulhi(n, m1);
return (t + n) >> shift;
#else
uint64_t t = ((uint64_t) n * m1) >> 32;
return (t + n) >> shift;
#endif
}
C10_HOST_DEVICE inline unsigned int mod(unsigned int n) const {
return n - div(n) * divisor;
}
C10_HOST_DEVICE inline DivMod<unsigned int> divmod(unsigned int n) const {
unsigned int q = div(n);
return DivMod<unsigned int>(q, n - q * divisor);
}
unsigned int divisor;
unsigned int m1;
unsigned int shift;
};
}}} ### |
#pragma once
#include <assert.h>
#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)
#include <cuda_runtime.h>
#endif
namespace at {
namespace cuda {
namespace detail {
template <typename Value>
struct DivMod {
Value div, mod;
C10_HOST_DEVICE DivMod(Value div, Value mod) : div(div), mod(mod) { }
};
template <typename Value>
struct IntDivider {
IntDivider() = default;
IntDivider(Value d) : divisor(d) { }
C10_HOST_DEVICE inline Value div(Value n) const { return n / divisor; }
C10_HOST_DEVICE inline Value mod(Value n) const { return n % divisor; }
C10_HOST_DEVICE inline DivMod<Value> divmod(Value n) const {
return DivMod<Value>(n / divisor, n % divisor);
}
Value divisor;
};
template <>
struct IntDivider<unsigned int> {
static_assert(sizeof(unsigned int) == 4, "Assumes 32-bit unsigned int.");
IntDivider() = default;
IntDivider(unsigned int d) : divisor(d) {
assert(divisor >= 1 && divisor <= INT32_MAX);
for (shift = 0; shift < 32; shift++) if ((1U << shift) >= divisor) break;
uint64_t one = 1;
uint64_t magic = ((one << 32) * ((one << shift) - divisor)) / divisor + 1;
m1 = magic;
assert(m1 > 0 && m1 == magic);
}
C10_HOST_DEVICE inline unsigned int div(unsigned int n) const {
#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)
unsigned int t = __umulhi(n, m1);
return (t + n) >> shift;
#else
uint64_t t = ((uint64_t) n * m1) >> 32;
return (t + n) >> shift;
#endif
}
C10_HOST_DEVICE inline unsigned int mod(unsigned int n) const {
return n - div(n) * divisor;
}
C10_HOST_DEVICE inline DivMod<unsigned int> divmod(unsigned int n) const {
unsigned int q = div(n);
return DivMod<unsigned int>(q, n - q * divisor);
}
unsigned int divisor;
unsigned int m1;
unsigned int shift;
};
}}}
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <limits>
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_HIP char sin_name[] = "sin_impl";
#endif
void sin_kernel_hip(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto sin_string = jiterator_stringify(
template <typename T> T sin_impl(T a) { return std::sin(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "sin_name", [&]() {
jitted_gpu_kernel<
/*name=*/sin_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, sin_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "sin_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::sin(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"sin_hip",
[&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::sin(a); });
});
}
}
REGISTER_DISPATCH(sin_stub, &sin_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <limits>
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char sin_name[] = "sin_impl";
#endif
void sin_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto sin_string = jiterator_stringify(
template <typename T> T sin_impl(T a) { return std::sin(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "sin_name", [&]() {
jitted_gpu_kernel<
/*name=*/sin_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, sin_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "sin_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::sin(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"sin_cuda",
[&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::sin(a); });
});
}
}
REGISTER_DISPATCH(sin_stub, &sin_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <limits>
namespace at::native {
#if 0 && AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_HIP char tanh_name[] = "tanh_impl";
#endif
void tanh_kernel_hip(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
// Disabled due to accuracy issues
#if 0 && AT_USE_JITERATOR()
static const auto tanh_string = jiterator_stringify(
template <typename T> T tanh_impl(T a) { return std::tanh(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "tanh_name", [&]() {
jitted_gpu_kernel<
/*name=*/tanh_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, tanh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "tanh_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::tanh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"tanh_hip",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::tanh(a);
});
});
}
}
REGISTER_DISPATCH(tanh_stub, &tanh_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <limits>
namespace at::native {
#if 0 && AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char tanh_name[] = "tanh_impl";
#endif
void tanh_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
// Disabled due to accuracy issues
#if 0 && AT_USE_JITERATOR()
static const auto tanh_string = jiterator_stringify(
template <typename T> T tanh_impl(T a) { return std::tanh(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "tanh_name", [&]() {
jitted_gpu_kernel<
/*name=*/tanh_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, tanh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "tanh_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::tanh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"tanh_cuda",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::tanh(a);
});
});
}
}
REGISTER_DISPATCH(tanh_stub, &tanh_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <limits>
namespace at::native {
#if 0 && AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_HIP char tan_name[] = "tan_impl";
#endif
void tan_kernel_hip(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
// Disabled due to accuracy issues
#if 0 && AT_USE_JITERATOR()
static const auto tan_string = jiterator_stringify(
template <typename T> T tan_impl(T a) { return std::tan(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "tan_name", [&]() {
jitted_gpu_kernel<
/*name=*/tan_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, tan_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "tan_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::tan(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"tan_hip",
[&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::tan(a); });
});
}
}
REGISTER_DISPATCH(tan_stub, &tan_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <limits>
namespace at::native {
#if 0 && AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char tan_name[] = "tan_impl";
#endif
void tan_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
// Disabled due to accuracy issues
#if 0 && AT_USE_JITERATOR()
static const auto tan_string = jiterator_stringify(
template <typename T> T tan_impl(T a) { return std::tan(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "tan_name", [&]() {
jitted_gpu_kernel<
/*name=*/tan_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, tan_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "tan_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::tan(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"tan_cuda",
[&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::tan(a); });
});
}
}
REGISTER_DISPATCH(tan_stub, &tan_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#include <ATen/core/Tensor.h>
namespace at {
namespace native {
namespace internal {
template <typename scalar_t>
std::tuple<Tensor, Tensor, Tensor> unique_hip_template(
const Tensor& self,
const bool consecutive,
const bool return_inverse,
const bool return_counts);
} // namespace internal
} // namespace at
} // namespace native
### |
#include <ATen/core/Tensor.h>
namespace at {
namespace native {
namespace internal {
template <typename scalar_t>
std::tuple<Tensor, Tensor, Tensor> unique_cuda_template(
const Tensor& self,
const bool consecutive,
const bool return_inverse,
const bool return_counts);
} // namespace internal
} // namespace at
} // namespace native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/native/sparse/ValidateCompressedIndicesCommon.h>
#include <ATen/native/hip\Loops.cuh>
namespace at::native {
namespace {
template <typename func_t>
struct HIPKernelLauncher {
static void launch(TensorIteratorBase& iter, const func_t& f) {
gpu_kernel(iter, f);
}
};
}
void _validate_compressed_sparse_indices_hip(
const bool is_crow,
const Tensor& cidx,
const Tensor& idx,
const int64_t cdim,
const int64_t dim,
const int64_t nnz) {
validate_compressed_sparse_indices_kernel<HIPKernelLauncher>(
is_crow, cidx, idx, cdim, dim, nnz);
}
} // namespace at::native
### |
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/native/sparse/ValidateCompressedIndicesCommon.h>
#include <ATen/native/cuda/Loops.cuh>
namespace at::native {
namespace {
template <typename func_t>
struct CUDAKernelLauncher {
static void launch(TensorIteratorBase& iter, const func_t& f) {
gpu_kernel(iter, f);
}
};
}
void _validate_compressed_sparse_indices_cuda(
const bool is_crow,
const Tensor& cidx,
const Tensor& idx,
const int64_t cdim,
const int64_t dim,
const int64_t nnz) {
validate_compressed_sparse_indices_kernel<CUDAKernelLauncher>(
is_crow, cidx, idx, cdim, dim, nnz);
}
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/hip\Math.cuh>
#include <ATen/native/hip\jit_utils.h>
namespace at::native {
namespace {
/*
* This function is derived from the implementation of the zeta function in the Cephes Math Library.
* See note [3-Clause BSD License for the Cephes Math Library].
*/
// See note [Jiterator]
CONSTEXPR_EXCEPT_WIN_HIP char zeta_name[] = "zeta";
void zeta_kernel_hip(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "zeta_hip", [&]() {
opmath_jitted_gpu_kernel_with_scalars</*name=*/zeta_name,
/*return_dtype=*/ scalar_t,
/*f_inputs_dtype=*/ scalar_t>(iter, zeta_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "zeta_hip", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t x, scalar_t q) -> scalar_t {
return zeta<scalar_t, /*is_hip=*/true>(x, q);
});
});
#endif //jiterator
}
} // namespace (anonymous)
REGISTER_DISPATCH(zeta_stub, &zeta_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
namespace at::native {
namespace {
/*
* This function is derived from the implementation of the zeta function in the Cephes Math Library.
* See note [3-Clause BSD License for the Cephes Math Library].
*/
// See note [Jiterator]
CONSTEXPR_EXCEPT_WIN_CUDA char zeta_name[] = "zeta";
void zeta_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "zeta_cuda", [&]() {
opmath_jitted_gpu_kernel_with_scalars</*name=*/zeta_name,
/*return_dtype=*/ scalar_t,
/*f_inputs_dtype=*/ scalar_t>(iter, zeta_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "zeta_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t x, scalar_t q) -> scalar_t {
return zeta<scalar_t, /*is_cuda=*/true>(x, q);
});
});
#endif //jiterator
}
} // namespace (anonymous)
REGISTER_DISPATCH(zeta_stub, &zeta_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip\Loops.cuh>
namespace at {
namespace native {
Tensor& relu_quantized_hip_(Tensor& self) {
const auto zero_point = self.q_zero_point();
AT_DISPATCH_QINT_TYPES(
self.scalar_type(), "qrelu_hip", [&]() {
auto iter = TensorIterator::unary_op(self, self);
gpu_kernel(iter, [zero_point] GPU_LAMBDA(scalar_t value) -> scalar_t {
return scalar_t(std::max<underlying_t>(value.val_, zero_point));
});
});
return self;
}
} // namespace at::native
} // namespace at
### |
#include <ATen/ATen.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
namespace at {
namespace native {
Tensor& relu_quantized_cuda_(Tensor& self) {
const auto zero_point = self.q_zero_point();
AT_DISPATCH_QINT_TYPES(
self.scalar_type(), "qrelu_cuda", [&]() {
auto iter = TensorIterator::unary_op(self, self);
gpu_kernel(iter, [zero_point] GPU_LAMBDA(scalar_t value) -> scalar_t {
return scalar_t(std::max<underlying_t>(value.val_, zero_point));
});
});
return self;
}
} // namespace at::native
} // namespace at
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorIterator.h>
#include <ATen/native/hip\Loops.cuh>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/empty.h>
#include <ATen/ops/int_repr_native.h>
#endif
namespace at {
namespace native {
Tensor int_repr_quantized_hip(const Tensor& self) {
Tensor dst;
AT_DISPATCH_QINT_TYPES(self.scalar_type(), "int_repr_quantized_hip", [&]() {
dst = at::empty(
self.sizes(),
self.options().dtype(UNDERLYING_TYPE),
self.suggest_memory_format());
auto iter = TensorIteratorConfig()
.check_all_same_dtype(false)
.add_output(dst)
.add_input(self)
.build();
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t value) -> underlying_t {
return value.val_;
});
});
return dst;
}
} // namespace native
} // namespace at
### |
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/empty.h>
#include <ATen/ops/int_repr_native.h>
#endif
namespace at {
namespace native {
Tensor int_repr_quantized_cuda(const Tensor& self) {
Tensor dst;
AT_DISPATCH_QINT_TYPES(self.scalar_type(), "int_repr_quantized_cuda", [&]() {
dst = at::empty(
self.sizes(),
self.options().dtype(UNDERLYING_TYPE),
self.suggest_memory_format());
auto iter = TensorIteratorConfig()
.check_all_same_dtype(false)
.add_output(dst)
.add_input(self)
.build();
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t value) -> underlying_t {
return value.val_;
});
});
return dst;
}
} // namespace native
} // namespace at
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorIterator.h>
#include <ATen/native/hip\Loops.cuh>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_empty_affine_quantized.h>
#include <ATen/ops/_empty_per_channel_affine_quantized.h>
#include <ATen/ops/_make_per_channel_quantized_tensor_native.h>
#include <ATen/ops/_make_per_tensor_quantized_tensor_native.h>
#include <ATen/ops/empty.h>
#endif
namespace at {
namespace native {
void assign_quantized_tensor_hip(
const Tensor& self, Tensor& dst) {
AT_DISPATCH_QINT_TYPES(
dst.scalar_type(), "assign_quantized_tensor_hip", [&]() {
auto iter = TensorIteratorConfig()
.check_all_same_dtype(false)
.add_output(dst)
.add_input(self)
.build();
gpu_kernel(iter, [] GPU_LAMBDA(underlying_t value) -> scalar_t {
return scalar_t(value);
});
});
}
Tensor make_per_tensor_quantized_tensor_hip(
const Tensor& self,
double scale,
int64_t zero_point) {
Tensor dst = at::_empty_affine_quantized(
self.sizes(),
self.options().dtype(toQIntType(self.scalar_type())),
scale,
zero_point);
assign_quantized_tensor_hip(self, dst);
return dst;
}
Tensor make_per_channel_quantized_tensor_hip(
const Tensor& self,
const Tensor& scales,
const Tensor& zero_points,
int64_t axis) {
Tensor dst = at::_empty_per_channel_affine_quantized(
self.sizes(),
scales,
zero_points,
axis,
self.options().dtype(toQIntType(self.scalar_type())));
assign_quantized_tensor_hip(self, dst);
return dst;
}
} // namespace native
} // namespace at
### |
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_empty_affine_quantized.h>
#include <ATen/ops/_empty_per_channel_affine_quantized.h>
#include <ATen/ops/_make_per_channel_quantized_tensor_native.h>
#include <ATen/ops/_make_per_tensor_quantized_tensor_native.h>
#include <ATen/ops/empty.h>
#endif
namespace at {
namespace native {
void assign_quantized_tensor_cuda(
const Tensor& self, Tensor& dst) {
AT_DISPATCH_QINT_TYPES(
dst.scalar_type(), "assign_quantized_tensor_cuda", [&]() {
auto iter = TensorIteratorConfig()
.check_all_same_dtype(false)
.add_output(dst)
.add_input(self)
.build();
gpu_kernel(iter, [] GPU_LAMBDA(underlying_t value) -> scalar_t {
return scalar_t(value);
});
});
}
Tensor make_per_tensor_quantized_tensor_cuda(
const Tensor& self,
double scale,
int64_t zero_point) {
Tensor dst = at::_empty_affine_quantized(
self.sizes(),
self.options().dtype(toQIntType(self.scalar_type())),
scale,
zero_point);
assign_quantized_tensor_cuda(self, dst);
return dst;
}
Tensor make_per_channel_quantized_tensor_cuda(
const Tensor& self,
const Tensor& scales,
const Tensor& zero_points,
int64_t axis) {
Tensor dst = at::_empty_per_channel_affine_quantized(
self.sizes(),
scales,
zero_points,
axis,
self.options().dtype(toQIntType(self.scalar_type())));
assign_quantized_tensor_cuda(self, dst);
return dst;
}
} // namespace native
} // namespace at
### |
// !!! This is a file automatically generated by hipify!!!
#pragma once
#include <ATen/hip/detail\TensorInfo.cuh>
#include <c10/macros/Macros.h>
namespace at {
class Tensor;
}
namespace c10 {
class Scalar;
}
namespace at { namespace native {
void s_addmm_out_sparse_dense_hip_worker(int64_t nnz, int64_t m, int64_t n, int64_t k, Tensor& r_, const Scalar& beta, const Tensor& t, const Scalar& alpha, Tensor& indices, Tensor& values, const Tensor& dense);
}} // namespace at::native
### |
#pragma once
#include <ATen/cuda/detail/TensorInfo.cuh>
#include <c10/macros/Macros.h>
namespace at {
class Tensor;
}
namespace c10 {
class Scalar;
}
namespace at { namespace native {
void s_addmm_out_sparse_dense_cuda_worker(int64_t nnz, int64_t m, int64_t n, int64_t k, Tensor& r_, const Scalar& beta, const Tensor& t, const Scalar& alpha, Tensor& indices, Tensor& values, const Tensor& dense);
}} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
// No "#pragma once" because this is a raw definition that can be copied by jit codegen.
// Eager mode clients should not include this file directly, instead,
// they should #include <ATen/hip\HIPGeneratorImpl.h>, which has a #pragma once.
// Stores RNG state values. Passed as a kernel argument.
// See Note [HIP Graph-safe RNG states].
//
// The raw definition lives in its own file so jit codegen can easily copy it.
namespace at {
struct PhiloxHipState {
PhiloxHipState() = default;
// Called if graph capture is not underway
PhiloxHipState(uint64_t seed,
uint64_t offset) {
seed_.val = seed;
offset_.val = offset;
}
// Called if graph capture is underway
PhiloxHipState(int64_t* seed,
int64_t* offset_extragraph,
uint32_t offset_intragraph) {
seed_.ptr = seed;
offset_.ptr = offset_extragraph;
offset_intragraph_ = offset_intragraph;
captured_ = true;
}
// Public members, directly accessible by at::cuda::philox::unpack.
// If we made them private with getters/setters, the getters/setters
// would have to be __device__, and we can't declare __device__ in ATen.
union Payload {
uint64_t val;
int64_t* ptr;
};
Payload seed_;
Payload offset_;
uint32_t offset_intragraph_ = 0;
bool captured_ = false;
};
} // namespace at
### |
// No "#pragma once" because this is a raw definition that can be copied by jit codegen.
// Eager mode clients should not include this file directly, instead,
// they should #include <ATen/cuda/CUDAGeneratorImpl.h>, which has a #pragma once.
// Stores RNG state values. Passed as a kernel argument.
// See Note [CUDA Graph-safe RNG states].
//
// The raw definition lives in its own file so jit codegen can easily copy it.
namespace at {
struct PhiloxCudaState {
PhiloxCudaState() = default;
// Called if graph capture is not underway
PhiloxCudaState(uint64_t seed,
uint64_t offset) {
seed_.val = seed;
offset_.val = offset;
}
// Called if graph capture is underway
PhiloxCudaState(int64_t* seed,
int64_t* offset_extragraph,
uint32_t offset_intragraph) {
seed_.ptr = seed;
offset_.ptr = offset_extragraph;
offset_intragraph_ = offset_intragraph;
captured_ = true;
}
// Public members, directly accessible by at::cuda::philox::unpack.
// If we made them private with getters/setters, the getters/setters
// would have to be __device__, and we can't declare __device__ in ATen.
union Payload {
uint64_t val;
int64_t* ptr;
};
Payload seed_;
Payload offset_;
uint32_t offset_intragraph_ = 0;
bool captured_ = false;
};
} // namespace at
### |
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/hip/flash_attn\fmha_bwd_launch_template.h>
void run_fmha_bwd_hdim128(FMHA_dgrad_params ¶ms, hipStream_t stream, const bool configure) {
FP16_SWITCH(params.is_bf16, ([&] {
using Kernel_traits = FMHA_kernel_traits<128, 128, 16, 1, 8, 0x100u, elem_type>;
run_fmha_bwd_loop<Kernel_traits>(params, stream, configure);
}));
}### |
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/cuda/flash_attn/fmha_bwd_launch_template.h>
void run_fmha_bwd_hdim128(FMHA_dgrad_params ¶ms, cudaStream_t stream, const bool configure) {
FP16_SWITCH(params.is_bf16, ([&] {
using Kernel_traits = FMHA_kernel_traits<128, 128, 16, 1, 8, 0x100u, elem_type>;
run_fmha_bwd_loop<Kernel_traits>(params, stream, configure);
}));
}
### |
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/hip/flash_attn\fmha_bwd_launch_template.h>
void run_fmha_bwd_hdim32(FMHA_dgrad_params ¶ms, hipStream_t stream, const bool configure) {
FP16_SWITCH(params.is_bf16, ([&] {
if (params.seqlen_k == 128) {
using Kernel_traits = FMHA_kernel_traits<128, 32, 16, 1, 8, 0x08u, elem_type>;
run_fmha_bwd_loop<Kernel_traits>(params, stream, configure);
} else if (params.seqlen_k >= 256) {
using Kernel_traits = FMHA_kernel_traits<256, 32, 16, 1, 8, 0x08u, elem_type>;
run_fmha_bwd_loop<Kernel_traits>(params, stream, configure);
}
}));
}### |
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/cuda/flash_attn/fmha_bwd_launch_template.h>
void run_fmha_bwd_hdim32(FMHA_dgrad_params ¶ms, cudaStream_t stream, const bool configure) {
FP16_SWITCH(params.is_bf16, ([&] {
if (params.seqlen_k == 128) {
using Kernel_traits = FMHA_kernel_traits<128, 32, 16, 1, 8, 0x08u, elem_type>;
run_fmha_bwd_loop<Kernel_traits>(params, stream, configure);
} else if (params.seqlen_k >= 256) {
using Kernel_traits = FMHA_kernel_traits<256, 32, 16, 1, 8, 0x08u, elem_type>;
run_fmha_bwd_loop<Kernel_traits>(params, stream, configure);
}
}));
}
### |
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/hip/flash_attn\fmha_bwd_launch_template.h>
void run_fmha_bwd_hdim64(FMHA_dgrad_params ¶ms, hipStream_t stream, const bool configure) {
FP16_SWITCH(params.is_bf16, ([&] {
auto dprops = at::cuda::getCurrentDeviceProperties();
if (params.seqlen_k == 128) {
using Kernel_traits = FMHA_kernel_traits<128, 64, 16, 1, 8, 0x08u, elem_type>;
run_fmha_bwd_loop<Kernel_traits>(params, stream, configure);
} else if (params.seqlen_k >= 256) {
if ((dprops->major == 8 && dprops->minor == 0) ||
(dprops->major == 9 && dprops->minor == 0)) {
// Don't share smem for K & V, and don't keep V in registers
// This speeds things up by 2-3% by avoiding register spills, but it
// uses more shared memory, which is fine on A100 and H100 but not other
// GPUs. For other GPUs, we keep V in registers.
using Kernel_traits =
FMHA_kernel_traits<256, 64, 16, 1, 8, 0x100u, elem_type>;
run_fmha_bwd_loop<Kernel_traits>(params, stream, configure);
} else if (dprops->major == 8 && dprops->minor > 0) {
using Kernel_traits =
FMHA_kernel_traits<256, 64, 16, 1, 8, 0x08u, elem_type>;
run_fmha_bwd_loop<Kernel_traits>(params, stream, configure);
} else if (dprops->major == 7 && dprops->minor == 5) {
using Kernel_traits =
FMHA_kernel_traits<128, 64, 16, 1, 8, 0x08u, elem_type>;
run_fmha_bwd_loop<Kernel_traits>(params, stream, configure);
}
}
}));
}### |
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/cuda/flash_attn/fmha_bwd_launch_template.h>
void run_fmha_bwd_hdim64(FMHA_dgrad_params ¶ms, cudaStream_t stream, const bool configure) {
FP16_SWITCH(params.is_bf16, ([&] {
auto dprops = at::cuda::getCurrentDeviceProperties();
if (params.seqlen_k == 128) {
using Kernel_traits = FMHA_kernel_traits<128, 64, 16, 1, 8, 0x08u, elem_type>;
run_fmha_bwd_loop<Kernel_traits>(params, stream, configure);
} else if (params.seqlen_k >= 256) {
if ((dprops->major == 8 && dprops->minor == 0) ||
(dprops->major == 9 && dprops->minor == 0)) {
// Don't share smem for K & V, and don't keep V in registers
// This speeds things up by 2-3% by avoiding register spills, but it
// uses more shared memory, which is fine on A100 and H100 but not other
// GPUs. For other GPUs, we keep V in registers.
using Kernel_traits =
FMHA_kernel_traits<256, 64, 16, 1, 8, 0x100u, elem_type>;
run_fmha_bwd_loop<Kernel_traits>(params, stream, configure);
} else if (dprops->major == 8 && dprops->minor > 0) {
using Kernel_traits =
FMHA_kernel_traits<256, 64, 16, 1, 8, 0x08u, elem_type>;
run_fmha_bwd_loop<Kernel_traits>(params, stream, configure);
} else if (dprops->major == 7 && dprops->minor == 5) {
using Kernel_traits =
FMHA_kernel_traits<128, 64, 16, 1, 8, 0x08u, elem_type>;
run_fmha_bwd_loop<Kernel_traits>(params, stream, configure);
}
}
}));
}
### |
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/hip/flash_attn\fmha_fwd_launch_template.h>
void run_fmha_fwd_hdim128(Launch_params<FMHA_fprop_params> &launch_params) {
FP16_SWITCH(launch_params.params.is_bf16, ([&] {
using Kernel_traits = FMHA_kernel_traits<128, 128, 16, 1, 4, 0x08u, elem_type>;
run_fmha_fwd_loop<Kernel_traits>(launch_params);
}));
}### |
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/cuda/flash_attn/fmha_fwd_launch_template.h>
void run_fmha_fwd_hdim128(Launch_params<FMHA_fprop_params> &launch_params) {
FP16_SWITCH(launch_params.params.is_bf16, ([&] {
using Kernel_traits = FMHA_kernel_traits<128, 128, 16, 1, 4, 0x08u, elem_type>;
run_fmha_fwd_loop<Kernel_traits>(launch_params);
}));
}
### |
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/hip/flash_attn\fmha_fwd_launch_template.h>
void run_fmha_fwd_hdim32(Launch_params<FMHA_fprop_params> &launch_params) {
FP16_SWITCH(launch_params.params.is_bf16, ([&] {
if (launch_params.params.seqlen_k == 128) {
using Kernel_traits = FMHA_kernel_traits<128, 32, 16, 1, 4, 0x08u, elem_type>;
run_fmha_fwd_loop<Kernel_traits>(launch_params);
} else if (launch_params.params.seqlen_k >= 256) {
using Kernel_traits = FMHA_kernel_traits<256, 32, 16, 1, 4, 0x08u, elem_type>;
run_fmha_fwd_loop<Kernel_traits>(launch_params);
}
}));
}### |
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/cuda/flash_attn/fmha_fwd_launch_template.h>
void run_fmha_fwd_hdim32(Launch_params<FMHA_fprop_params> &launch_params) {
FP16_SWITCH(launch_params.params.is_bf16, ([&] {
if (launch_params.params.seqlen_k == 128) {
using Kernel_traits = FMHA_kernel_traits<128, 32, 16, 1, 4, 0x08u, elem_type>;
run_fmha_fwd_loop<Kernel_traits>(launch_params);
} else if (launch_params.params.seqlen_k >= 256) {
using Kernel_traits = FMHA_kernel_traits<256, 32, 16, 1, 4, 0x08u, elem_type>;
run_fmha_fwd_loop<Kernel_traits>(launch_params);
}
}));
}
### |
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/hip/flash_attn\fmha_fwd_launch_template.h>
void run_fmha_fwd_hdim64(Launch_params<FMHA_fprop_params> &launch_params) {
FP16_SWITCH(launch_params.params.is_bf16, ([&] {
if (launch_params.params.seqlen_k == 128) {
using Kernel_traits = FMHA_kernel_traits<128, 64, 16, 1, 4, 0x08u, elem_type>;
run_fmha_fwd_loop<Kernel_traits>(launch_params);
} else if (launch_params.params.seqlen_k >= 256) {
using Kernel_traits = FMHA_kernel_traits<256, 64, 16, 1, 4, 0x08u, elem_type>;
run_fmha_fwd_loop<Kernel_traits>(launch_params);
}
}));
}
### |
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/cuda/flash_attn/fmha_fwd_launch_template.h>
void run_fmha_fwd_hdim64(Launch_params<FMHA_fprop_params> &launch_params) {
FP16_SWITCH(launch_params.params.is_bf16, ([&] {
if (launch_params.params.seqlen_k == 128) {
using Kernel_traits = FMHA_kernel_traits<128, 64, 16, 1, 4, 0x08u, elem_type>;
run_fmha_fwd_loop<Kernel_traits>(launch_params);
} else if (launch_params.params.seqlen_k >= 256) {
using Kernel_traits = FMHA_kernel_traits<256, 64, 16, 1, 4, 0x08u, elem_type>;
run_fmha_fwd_loop<Kernel_traits>(launch_params);
}
}));
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 128, 128, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 128, 128, 128>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_128x128_k128_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 128, 128, 128>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 800
#if __HIP_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 128, 128, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x128_k128_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k128_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 128>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 800
#if __HIP_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k128_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__HIP_ARCH__ + 0) / 10);
#endif
}
### |
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 128, 128, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 128, 128, 128>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_128x128_k128_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 128, 128, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 128, 128, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x128_k128_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k128_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k128_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 32, true>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 32, true>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k32_seqaligned_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 32, true>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 800
#if __HIP_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 32, true>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k32_seqaligned_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k32_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 32>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 800
#if __HIP_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k32_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__HIP_ARCH__ + 0) / 10);
#endif
}
### |
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 32, true>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 32, true>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k32_seqaligned_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 32, true>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 32, true>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k32_seqaligned_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k32_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k32_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k32_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 32>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 800
#if __HIP_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k32_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__HIP_ARCH__ + 0) / 10);
#endif
}
### |
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k32_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k32_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64, true>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64, true>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k64_seqaligned_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64, true>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 800
#if __HIP_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64, true>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k64_seqaligned_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k64_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 800
#if __HIP_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k64_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__HIP_ARCH__ + 0) / 10);
#endif
}
### |
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64, true>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64, true>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k64_seqaligned_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64, true>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64, true>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k64_seqaligned_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k64_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k64_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
### |
#pragma once
#include <ATen/CollapseDims.h>
namespace at {
namespace hip {
namespace detail {
#define MAX_TENSORINFO_DIMS 25
template <typename T, typename IndexType>
struct TensorInfo {
TensorInfo();
TensorInfo(T* p, int dim, IndexType sz[MAX_TENSORINFO_DIMS], IndexType st[MAX_TENSORINFO_DIMS]);
void reduceDim(int dim);
int collapseDims(const int excludeDim = -1);
__host__ __device__ inline bool isContiguous() const {
return (dims == 1 && strides[0] == 1);
}
T* data;
IndexType sizes[MAX_TENSORINFO_DIMS];
IndexType strides[MAX_TENSORINFO_DIMS];
int dims;
};
template <typename T, typename IndexType>
TensorInfo<T, IndexType>::TensorInfo() {
data = nullptr;
dims = 0;
}
template <typename T, typename IndexType>
TensorInfo<T, IndexType>::TensorInfo(T* p, int dim, IndexType sz[MAX_TENSORINFO_DIMS], IndexType st[MAX_TENSORINFO_DIMS]) {
data = p;
dims = dim;
TORCH_CHECK(dims < MAX_TENSORINFO_DIMS, "HIP Tensors cannot have more than 25 dimensions");
for (int i = 0; i < dim; ++i) {
sizes[i] = sz[i];
strides[i] = st[i];
}
}
template <typename T, typename IndexType>
void
TensorInfo<T, IndexType>::reduceDim(int dim) {
TORCH_CHECK(dim < dims && dim >= 0, "expected dim between 0 and dims - 1");
sizes[dim] = 1;
}
template <typename T, typename IndexType>
int
TensorInfo<T, IndexType>::collapseDims(const int excludeDim) {
auto result = at::collapse_dims(sizes, strides, dims, excludeDim);
dims = std::get<1>(result);
return std::get<0>(result);
}
template <typename T, typename IndexType, int Dims>
struct IndexToOffset {
static __host__ __device__ IndexType get(
IndexType linearId, const TensorInfo<T, IndexType>& info) {
IndexType offset = 0;
for (int i = Dims - 1; i > 0; --i) {
IndexType curDimIndex = linearId % info.sizes[i];
IndexType curDimOffset = curDimIndex * info.strides[i];
offset += curDimOffset;
linearId /= info.sizes[i];
}
return offset + linearId * info.strides[0];
}
};
template <typename T, typename IndexType>
struct IndexToOffset<T, IndexType, -1> {
static inline __host__ __device__ IndexType get(
IndexType linearId, const TensorInfo<T, IndexType>& info) {
IndexType offset = 0;
for (int i = info.dims - 1; i > 0; --i) {
IndexType curDimIndex = linearId % info.sizes[i];
IndexType curDimOffset = curDimIndex * info.strides[i];
offset += curDimOffset;
linearId /= info.sizes[i];
}
return offset + linearId * info.strides[0];
}
};
}
}
} ### |
#pragma once
#include <ATen/CollapseDims.h>
namespace at {
namespace cuda {
namespace detail {
#define MAX_TENSORINFO_DIMS 25
template <typename T, typename IndexType>
struct TensorInfo {
TensorInfo();
TensorInfo(T* p, int dim, IndexType sz[MAX_TENSORINFO_DIMS], IndexType st[MAX_TENSORINFO_DIMS]);
void reduceDim(int dim);
int collapseDims(const int excludeDim = -1);
__host__ __device__ inline bool isContiguous() const {
return (dims == 1 && strides[0] == 1);
}
T* data;
IndexType sizes[MAX_TENSORINFO_DIMS];
IndexType strides[MAX_TENSORINFO_DIMS];
int dims;
};
template <typename T, typename IndexType>
TensorInfo<T, IndexType>::TensorInfo() {
data = nullptr;
dims = 0;
}
template <typename T, typename IndexType>
TensorInfo<T, IndexType>::TensorInfo(T* p, int dim, IndexType sz[MAX_TENSORINFO_DIMS], IndexType st[MAX_TENSORINFO_DIMS]) {
data = p;
dims = dim;
TORCH_CHECK(dims < MAX_TENSORINFO_DIMS, "CUDA Tensors cannot have more than 25 dimensions");
for (int i = 0; i < dim; ++i) {
sizes[i] = sz[i];
strides[i] = st[i];
}
}
template <typename T, typename IndexType>
void
TensorInfo<T, IndexType>::reduceDim(int dim) {
TORCH_CHECK(dim < dims && dim >= 0, "expected dim between 0 and dims - 1");
sizes[dim] = 1;
}
template <typename T, typename IndexType>
int
TensorInfo<T, IndexType>::collapseDims(const int excludeDim) {
auto result = at::collapse_dims(sizes, strides, dims, excludeDim);
dims = std::get<1>(result);
return std::get<0>(result);
}
template <typename T, typename IndexType, int Dims>
struct IndexToOffset {
static __host__ __device__ IndexType get(
IndexType linearId, const TensorInfo<T, IndexType>& info) {
IndexType offset = 0;
for (int i = Dims - 1; i > 0; --i) {
IndexType curDimIndex = linearId % info.sizes[i];
IndexType curDimOffset = curDimIndex * info.strides[i];
offset += curDimOffset;
linearId /= info.sizes[i];
}
return offset + linearId * info.strides[0];
}
};
template <typename T, typename IndexType>
struct IndexToOffset<T, IndexType, -1> {
static inline __host__ __device__ IndexType get(
IndexType linearId, const TensorInfo<T, IndexType>& info) {
IndexType offset = 0;
for (int i = info.dims - 1; i > 0; --i) {
IndexType curDimIndex = linearId % info.sizes[i];
IndexType curDimOffset = curDimIndex * info.strides[i];
offset += curDimOffset;
linearId /= info.sizes[i];
}
return offset + linearId * info.strides[0];
}
};
}
}
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 64>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k64_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 64>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 800
#if __HIP_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k64_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__HIP_ARCH__ + 0) / 10);
#endif
}
### |
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 64>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k64_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k64_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 128, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 128, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_128x64_k65536_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 128, 64, 65536>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 800
#if __HIP_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 128, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x64_k65536_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 64, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k65536_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 64, 64, 65536>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 800
#if __HIP_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k65536_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__HIP_ARCH__ + 0) / 10);
#endif
}
### |
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 128, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 128, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_128x64_k65536_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 128, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 128, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x64_k65536_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 64, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k65536_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 64, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k65536_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 128, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 128, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_128x64_k65536_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 128, 64, 65536>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 800
#if __HIP_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 128, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x64_k65536_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k65536_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 65536>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 800
#if __HIP_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k65536_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__HIP_ARCH__ + 0) / 10);
#endif
}
### |
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 128, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 128, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_128x64_k65536_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 128, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 128, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x64_k65536_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k65536_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k65536_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 64, 96>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 64, 96>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_128x64_k96_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 64, 96>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 800
#if __HIP_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 64, 96>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x64_k96_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__HIP_ARCH__ + 0) / 10);
#endif
}
### |
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 64, 96>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 64, 96>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_128x64_k96_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 64, 96>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 64, 96>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x64_k96_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 128, 64, 96>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 128, 64, 96>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_128x64_k96_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 128, 64, 96>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 800
#if __HIP_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 128, 64, 96>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_128x64_k96_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__HIP_ARCH__ + 0) / 10);
#endif
}
### |
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 128, 64, 96>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 128, 64, 96>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_128x64_k96_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 128, 64, 96>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 128, 64, 96>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_128x64_k96_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
### |
#include "hip/hip_runtime.h"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 500
#if __HIP_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_sm50` is for sm50-sm70, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 128>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 128>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 700
#if __HIP_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_sm70` is for sm70-sm75, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 128>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 128>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 750
#if __HIP_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_sm75` is for sm75-sm80, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}### |
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 128>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 128>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
### |
#include "hip/hip_runtime.h"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 128>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 128>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 500
#if __HIP_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm50` is for sm50-sm70, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 128>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 128>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 700
#if __HIP_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm70` is for sm70-sm75, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 128>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 128>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 750
#if __HIP_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm75` is for sm75-sm80, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}### |
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 128>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 128>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 128>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
### |
#include "hip/hip_runtime.h"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 32>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k32_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 32>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 500
#if __HIP_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_sm50` is for sm50-sm70, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 32>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k32_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 32>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 700
#if __HIP_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_sm70` is for sm70-sm75, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 32>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k32_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 32>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 750
#if __HIP_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_sm75` is for sm75-sm80, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}### |
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 32>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k32_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 32>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k32_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 32>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k32_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
### |
#include "hip/hip_runtime.h"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 32>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 32>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 500
#if __HIP_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm50` is for sm50-sm70, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 32>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 32>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 700
#if __HIP_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm70` is for sm70-sm75, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 32>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 32>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 750
#if __HIP_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm75` is for sm75-sm80, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}### |
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 32>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 32>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 32>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
### |
#include "hip/hip_runtime.h"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k64_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 64>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 500
#if __HIP_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_sm50` is for sm50-sm70, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k64_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 64>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 700
#if __HIP_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_sm70` is for sm70-sm75, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k64_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 64>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 750
#if __HIP_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_sm75` is for sm75-sm80, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}### |
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k64_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k64_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k64_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
### |
// !!! This is a file automatically generated by hipify!!!
// No "#pragma once" because this is a raw definition that can be copied by jit codegen.
// Eager mode clients should not include this file directly, instead,
// they should #include <ATen/hip\HIPGraphsUtils.cuh>, which has a #pragma once.
namespace at {
namespace hip {
namespace philox {
// In-kernel call to retrieve philox seed and offset from a PhiloxHipState instance whether
// that instance was created with graph capture underway or not.
// See Note [HIP Graph-safe RNG states].
//
// We can't write a __device__ function in HIPGeneratorImpl.h, because it's in ATen.
// Also, whatever call unpacks PhiloxHipState in consumer kernels must be inlineable.
// Easiest thing that comes to mind is, define a __device__ unpack helper here, in ATen/cuda.
//
// The raw definition lives in its own file so jit codegen can easily copy it.
__device__ __forceinline__ std::tuple<uint64_t, uint64_t>
unpack(at::PhiloxHipState arg) {
if (arg.captured_) {
// static_cast avoids "warning: invalid narrowing conversion from "long" to "unsigned long".
// *(arg.offset_.ptr) is a broadcast load of a single int64_t to the entire kernel.
// For most threads' reads it will hit in cache, so it shouldn't hurt performance.
return std::make_tuple(static_cast<uint64_t>(*arg.seed_.ptr), static_cast<uint64_t>(*(arg.offset_.ptr) + arg.offset_intragraph_));
} else {
return std::make_tuple(arg.seed_.val, arg.offset_.val);
}
}
} // namespace philox
} // namespace hip
} // namespace at
### |
// No "#pragma once" because this is a raw definition that can be copied by jit codegen.
// Eager mode clients should not include this file directly, instead,
// they should #include <ATen/cuda/CUDAGraphsUtils.cuh>, which has a #pragma once.
namespace at {
namespace cuda {
namespace philox {
// In-kernel call to retrieve philox seed and offset from a PhiloxCudaState instance whether
// that instance was created with graph capture underway or not.
// See Note [CUDA Graph-safe RNG states].
//
// We can't write a __device__ function in CUDAGeneratorImpl.h, because it's in ATen.
// Also, whatever call unpacks PhiloxCudaState in consumer kernels must be inlineable.
// Easiest thing that comes to mind is, define a __device__ unpack helper here, in ATen/cuda.
//
// The raw definition lives in its own file so jit codegen can easily copy it.
__device__ __forceinline__ std::tuple<uint64_t, uint64_t>
unpack(at::PhiloxCudaState arg) {
if (arg.captured_) {
// static_cast avoids "warning: invalid narrowing conversion from "long" to "unsigned long".
// *(arg.offset_.ptr) is a broadcast load of a single int64_t to the entire kernel.
// For most threads' reads it will hit in cache, so it shouldn't hurt performance.
return std::make_tuple(static_cast<uint64_t>(*arg.seed_.ptr), static_cast<uint64_t>(*(arg.offset_.ptr) + arg.offset_intragraph_));
} else {
return std::make_tuple(arg.seed_.val, arg.offset_.val);
}
}
} // namespace philox
} // namespace cuda
} // namespace at
### |
#include "hip/hip_runtime.h"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 64>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 500
#if __HIP_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm50` is for sm50-sm70, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 64>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 700
#if __HIP_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm70` is for sm70-sm75, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 64>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 750
#if __HIP_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm75` is for sm75-sm80, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}### |
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
### |
#include "hip/hip_runtime.h"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 65536>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k65536_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 65536>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 500
#if __HIP_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k65536_sm50` is for sm50-sm70, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 65536>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k65536_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 65536>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 700
#if __HIP_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k65536_sm70` is for sm70-sm75, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 65536>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k65536_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 65536>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 750
#if __HIP_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k65536_sm75` is for sm75-sm80, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}### |
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 65536>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k65536_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k65536_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 65536>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k65536_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k65536_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 65536>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k65536_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k65536_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <gtest/gtest.h>
#include <c10/hip/HIPException.h>
int safeDeviceCount() {
int count;
hipError_t err = hipGetDeviceCount(&count);
if (err == hipErrorInsufficientDriver || err == hipErrorNoDevice) {
return 0;
}
return count;
}
#define SKIP_IF_NO_GPU() \
do { \
if (safeDeviceCount() == 0) { \
return; \
} \
} while(0)
#define C10_ASSERT_NEAR(a, b, tol) assert(abs(a - b) < tol)
#define C10_DEFINE_TEST(a, b) \
__global__ void HIP##a##b(); \
TEST(a##Device, b) { \
SKIP_IF_NO_GPU(); \
hipDeviceSynchronize(); \
hipLaunchKernelGGL(( HIP##a##b), dim3(1), dim3(1), 0, 0, ); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
hipDeviceSynchronize(); \
ASSERT_EQ(hipGetLastError(), hipSuccess); \
} \
__global__ void HIP##a##b()
#include <c10/test/util/complex_math_test_common.h>
#undef C10_DEFINE_TEST
#undef C10_ASSERT_NEAR
#define C10_DEFINE_TEST(a, b) TEST(a##Host, b)
#define C10_ASSERT_NEAR(a, b, tol) ASSERT_NEAR(a, b, tol)
#include <c10/test/util/complex_math_test_common.h>
### |
#include <gtest/gtest.h>
#include <c10/cuda/CUDAException.h>
int safeDeviceCount() {
int count;
cudaError_t err = cudaGetDeviceCount(&count);
if (err == cudaErrorInsufficientDriver || err == cudaErrorNoDevice) {
return 0;
}
return count;
}
#define SKIP_IF_NO_GPU() \
do { \
if (safeDeviceCount() == 0) { \
return; \
} \
} while(0)
#define C10_ASSERT_NEAR(a, b, tol) assert(abs(a - b) < tol)
#define C10_DEFINE_TEST(a, b) \
__global__ void CUDA##a##b(); \
TEST(a##Device, b) { \
SKIP_IF_NO_GPU(); \
cudaDeviceSynchronize(); \
CUDA##a##b<<<1, 1>>>(); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
cudaDeviceSynchronize(); \
ASSERT_EQ(cudaGetLastError(), cudaSuccess); \
} \
__global__ void CUDA##a##b()
#include <c10/test/util/complex_math_test_common.h>
#undef C10_DEFINE_TEST
#undef C10_ASSERT_NEAR
#define C10_DEFINE_TEST(a, b) TEST(a##Host, b)
#define C10_ASSERT_NEAR(a, b, tol) ASSERT_NEAR(a, b, tol)
#include <c10/test/util/complex_math_test_common.h>
### |
// !!! This is a file automatically generated by hipify!!!
#include <gtest/gtest.h>
#include <ATen/ATen.h>
#include <ATen/hip\HIPContext.h>
#include <c10/util/Optional.h>
#include <assert.h>
using namespace at;
// optional in cuda files
TEST(OptionalTest, OptionalTestHIP) {
if (!at::cuda::is_available()) return;
c10::optional<int64_t> trivially_destructible;
c10::optional<std::vector<int64_t>> non_trivially_destructible;
ASSERT_FALSE(trivially_destructible.has_value());
ASSERT_FALSE(non_trivially_destructible.has_value());
trivially_destructible = {5};
non_trivially_destructible = std::vector<int64_t>{5, 10};
ASSERT_TRUE(trivially_destructible.has_value());
ASSERT_TRUE(non_trivially_destructible.has_value());
}
### |
#include <gtest/gtest.h>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/util/Optional.h>
#include <assert.h>
using namespace at;
// optional in cuda files
TEST(OptionalTest, OptionalTestCUDA) {
if (!at::cuda::is_available()) return;
c10::optional<int64_t> trivially_destructible;
c10::optional<std::vector<int64_t>> non_trivially_destructible;
ASSERT_FALSE(trivially_destructible.has_value());
ASSERT_FALSE(non_trivially_destructible.has_value());
trivially_destructible = {5};
non_trivially_destructible = std::vector<int64_t>{5, 10};
ASSERT_TRUE(trivially_destructible.has_value());
ASSERT_TRUE(non_trivially_destructible.has_value());
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <gtest/gtest.h>
#include <ATen/ATen.h>
#include <ATen/core/TensorAccessor.h>
#include <ATen/hip\HIPContext.h>
#include <assert.h>
using namespace at;
__global__ void test_tensor_packed_accessor_kernel(
PackedTensorAccessor64<float, 1, RestrictPtrTraits> resa,
PackedTensorAccessor64<float, 2, RestrictPtrTraits> t1a,
PackedTensorAccessor64<float, 1, RestrictPtrTraits> t2a) {
for (int64_t i = 0; i < resa.size(0); i++) {
float val = 0.0f;
for (int64_t j = 0; j < t1a.size(1); j++) {
val += t1a[i][j] * t2a[j];
}
resa[i] = val;
}
}
// test GenericPackedTensorAccessor and Tensor.generic_packed_accessor
TEST(PackedtensoraccessorTest, PackedtensoraccessorTestHIP) {
if (!at::cuda::is_available()) return;
manual_seed(123);
Tensor t1 = rand({4, 4}, HIP(kFloat));
Tensor t2 = rand({4}, HIP(kFloat));
Tensor res = empty({4}, HIP(kFloat));
auto t1a = t1.packed_accessor64<float, 2, RestrictPtrTraits>();
auto t2a = t2.packed_accessor64<float, 1, RestrictPtrTraits>();
auto resa = res.packed_accessor64<float, 1, RestrictPtrTraits>();
auto stream = at::hip::getCurrentHIPStream();
hipLaunchKernelGGL(( test_tensor_packed_accessor_kernel), dim3(1), dim3(1), 0, stream, resa, t1a, t2a);
C10_HIP_KERNEL_LAUNCH_CHECK();
ASSERT_TRUE(hipSuccess == hipDeviceSynchronize());
auto expected = mv(t1, t2);
ASSERT_TRUE(res.allclose(expected));
}
### |
#include <gtest/gtest.h>
#include <ATen/ATen.h>
#include <ATen/core/TensorAccessor.h>
#include <ATen/cuda/CUDAContext.h>
#include <assert.h>
using namespace at;
__global__ void test_tensor_packed_accessor_kernel(
PackedTensorAccessor64<float, 1, RestrictPtrTraits> resa,
PackedTensorAccessor64<float, 2, RestrictPtrTraits> t1a,
PackedTensorAccessor64<float, 1, RestrictPtrTraits> t2a) {
for (int64_t i = 0; i < resa.size(0); i++) {
float val = 0.0f;
for (int64_t j = 0; j < t1a.size(1); j++) {
val += t1a[i][j] * t2a[j];
}
resa[i] = val;
}
}
// test GenericPackedTensorAccessor and Tensor.generic_packed_accessor
TEST(PackedtensoraccessorTest, PackedtensoraccessorTestCUDA) {
if (!at::cuda::is_available()) return;
manual_seed(123);
Tensor t1 = rand({4, 4}, CUDA(kFloat));
Tensor t2 = rand({4}, CUDA(kFloat));
Tensor res = empty({4}, CUDA(kFloat));
auto t1a = t1.packed_accessor64<float, 2, RestrictPtrTraits>();
auto t2a = t2.packed_accessor64<float, 1, RestrictPtrTraits>();
auto resa = res.packed_accessor64<float, 1, RestrictPtrTraits>();
auto stream = at::cuda::getCurrentCUDAStream();
test_tensor_packed_accessor_kernel<<<1, 1, 0, stream>>>(resa, t1a, t2a);
C10_CUDA_KERNEL_LAUNCH_CHECK();
ASSERT_TRUE(cudaSuccess == cudaDeviceSynchronize());
auto expected = mv(t1, t2);
ASSERT_TRUE(res.allclose(expected));
}
### |
// !!! This is a file automatically generated by hipify!!!
#pragma once
// TODO: Remove once torchvision has been updated to use the ATen header
#include <ATen/hip\Atomic.cuh>
### |
#pragma once
// TODO: Remove once torchvision has been updated to use the ATen header
#include <ATen/cuda/Atomic.cuh>
### |
// !!! This is a file automatically generated by hipify!!!
#pragma once
// TODO: Remove this header
#include <ATen/hip\DeviceUtils.cuh>
### |
#pragma once
// TODO: Remove this header
#include <ATen/cuda/DeviceUtils.cuh>
### |
// !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_runtime.h>
#include <sstream>
#include <vector>
#include "c10/util/Flags.h"
#include "caffe2/core/hip/common_gpu.h"
#include "caffe2/core/init.h"
#include "caffe2/core/logging.h"
using std::vector;
C10_DECLARE_int(caffe2_log_level);
int main(int argc, char** argv) {
caffe2::GlobalInit(&argc, &argv);
c10::SetUsageMessage(
"Inspects the GPUs on the current machine and prints out their details "
"provided by cuda.");
int gpu_count;
HIP_ENFORCE(hipGetDeviceCount(&gpu_count));
for (int i = 0; i < gpu_count; ++i) {
LOG(INFO) << "Querying device ID = " << i;
caffe2::DeviceQuery(i);
}
vector<vector<bool> > access_pattern;
CAFFE_ENFORCE(caffe2::GetHipPeerAccessPattern(&access_pattern));
std::stringstream sstream;
// Find topology
for (int i = 0; i < gpu_count; ++i) {
for (int j = 0; j < gpu_count; ++j) {
sstream << (access_pattern[i][j] ? "+" : "-") << " ";
}
sstream << std::endl;
}
LOG(INFO) << "Access pattern: " << std::endl << sstream.str();
return 0;
}
### |
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda_runtime.h>
#include <sstream>
#include <vector>
#include "c10/util/Flags.h"
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/init.h"
#include "caffe2/core/logging.h"
using std::vector;
C10_DECLARE_int(caffe2_log_level);
int main(int argc, char** argv) {
caffe2::GlobalInit(&argc, &argv);
c10::SetUsageMessage(
"Inspects the GPUs on the current machine and prints out their details "
"provided by cuda.");
int gpu_count;
CUDA_ENFORCE(cudaGetDeviceCount(&gpu_count));
for (int i = 0; i < gpu_count; ++i) {
LOG(INFO) << "Querying device ID = " << i;
caffe2::DeviceQuery(i);
}
vector<vector<bool> > access_pattern;
CAFFE_ENFORCE(caffe2::GetCudaPeerAccessPattern(&access_pattern));
std::stringstream sstream;
// Find topology
for (int i = 0; i < gpu_count; ++i) {
for (int j = 0; j < gpu_count; ++j) {
sstream << (access_pattern[i][j] ? "+" : "-") << " ";
}
sstream << std::endl;
}
LOG(INFO) << "Access pattern: " << std::endl << sstream.str();
return 0;
}
### |
// !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include "caffe2/core/operator.h"
#include "caffe2/core/context.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/proto/caffe2_pb.h"
#define PRINT_SIZE(cls) \
std::cout << "Size of " #cls ": " << sizeof(cls) << " bytes." \
<< std::endl;
int main(int /* unused */, char** /* unused */) {
PRINT_SIZE(caffe2::Blob);
PRINT_SIZE(caffe2::Tensor);
PRINT_SIZE(caffe2::CPUContext);
PRINT_SIZE(caffe2::HIPContext);
PRINT_SIZE(caffe2::OperatorBase);
PRINT_SIZE(caffe2::OperatorDef);
PRINT_SIZE(caffe2::Operator<caffe2::CPUContext>);
PRINT_SIZE(caffe2::Operator<caffe2::HIPContext>);
PRINT_SIZE(caffe2::TypeMeta);
PRINT_SIZE(caffe2::Workspace);
return 0;
}
### |
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include "caffe2/core/operator.h"
#include "caffe2/core/context.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/proto/caffe2_pb.h"
#define PRINT_SIZE(cls) \
std::cout << "Size of " #cls ": " << sizeof(cls) << " bytes." \
<< std::endl;
int main(int /* unused */, char** /* unused */) {
PRINT_SIZE(caffe2::Blob);
PRINT_SIZE(caffe2::Tensor);
PRINT_SIZE(caffe2::CPUContext);
PRINT_SIZE(caffe2::CUDAContext);
PRINT_SIZE(caffe2::OperatorBase);
PRINT_SIZE(caffe2::OperatorDef);
PRINT_SIZE(caffe2::Operator<caffe2::CPUContext>);
PRINT_SIZE(caffe2::Operator<caffe2::CUDAContext>);
PRINT_SIZE(caffe2::TypeMeta);
PRINT_SIZE(caffe2::Workspace);
return 0;
}
### |
#include "hip/hip_runtime.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <c10/hip/HIPDeviceAssertion.h>
#include <c10/hip/HIPException.h>
#include <c10/hip/HIPFunctions.h>
#include <c10/hip/HIPStream.h>
#include <chrono>
#include <iostream>
#include <string>
#include <thread>
using ::testing::HasSubstr;
void did_not_fail_diagnostics() {
std::cerr
<< "c10::hip::HIPKernelLaunchRegistry::get_singleton_ref().enabled_at_runtime = "
<< c10::hip::HIPKernelLaunchRegistry::get_singleton_ref().enabled_at_runtime
<< std::endl;
std::cerr
<< "c10::hip::HIPKernelLaunchRegistry::get_singleton_ref().enabled_at_compile_time = "
<< c10::hip::HIPKernelLaunchRegistry::get_singleton_ref().enabled_at_compile_time
<< std::endl;
std::cerr
<< "c10::hip::HIPKernelLaunchRegistry::get_singleton_ref().do_all_devices_support_managed_memory = "
<< c10::hip::HIPKernelLaunchRegistry::get_singleton_ref()
.do_all_devices_support_managed_memory
<< std::endl;
}
__global__ void hip_always_fail_assertion_kernel(
const int a, TORCH_DSA_KERNEL_ARGS) {
CUDA_KERNEL_ASSERT2(a != a);
}
void hip_device_assertions_1_var_test() {
const auto stream = c10::hip::getStreamFromPool();
TORCH_DSA_KERNEL_LAUNCH(
hip_always_fail_assertion_kernel, 1, 1, 0, stream, 1);
try {
c10::hip::device_synchronize();
did_not_fail_diagnostics();
throw std::runtime_error("Test didn't fail, but should have.");
} catch (const c10::Error& err) {
const auto err_str = std::string(err.what());
ASSERT_THAT(
err_str, HasSubstr("HIP device-side assertion failures were found on GPU #0!"));
ASSERT_THAT(
err_str, HasSubstr("Thread ID that failed assertion = [0,0,0]"));
ASSERT_THAT(err_str, HasSubstr("Block ID that failed assertion = [0,0,0]"));
ASSERT_THAT(err_str, HasSubstr("Device that launched kernel = 0"));
ASSERT_THAT(
err_str, HasSubstr(
"Name of kernel launched that led to failure = hip_always_fail_assertion_kernel"));
ASSERT_THAT(
err_str, HasSubstr("File containing kernel launch = " __FILE__));
ASSERT_THAT(
err_str, HasSubstr(
"Function containing kernel launch = " +
std::string(__FUNCTION__)));
ASSERT_THAT(
err_str, HasSubstr(
"Stream kernel was launched on = " + std::to_string(stream.id())));
}
}
TEST(HIPTest, hip_device_assertions_1_var_test) {
#ifdef TORCH_USE_HIP_DSA
c10::hip::HIPKernelLaunchRegistry::get_singleton_ref().enabled_at_runtime = true;
did_not_fail_diagnostics();
hip_device_assertions_1_var_test();
#else
GTEST_SKIP() << "HIP device-side assertions (DSA) was not enabled at compile time.";
#endif
}### |
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <c10/cuda/CUDADeviceAssertion.h>
#include <c10/cuda/CUDAException.h>
#include <c10/cuda/CUDAFunctions.h>
#include <c10/cuda/CUDAStream.h>
#include <chrono>
#include <iostream>
#include <string>
#include <thread>
using ::testing::HasSubstr;
void did_not_fail_diagnostics() {
std::cerr
<< "c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref().enabled_at_runtime = "
<< c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref().enabled_at_runtime
<< std::endl;
std::cerr
<< "c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref().enabled_at_compile_time = "
<< c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref().enabled_at_compile_time
<< std::endl;
std::cerr
<< "c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref().do_all_devices_support_managed_memory = "
<< c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref()
.do_all_devices_support_managed_memory
<< std::endl;
}
__global__ void cuda_always_fail_assertion_kernel(
const int a, TORCH_DSA_KERNEL_ARGS) {
CUDA_KERNEL_ASSERT2(a != a);
}
void cuda_device_assertions_1_var_test() {
const auto stream = c10::cuda::getStreamFromPool();
TORCH_DSA_KERNEL_LAUNCH(
cuda_always_fail_assertion_kernel, 1, 1, 0, stream, 1);
try {
c10::cuda::device_synchronize();
did_not_fail_diagnostics();
throw std::runtime_error("Test didn't fail, but should have.");
} catch (const c10::Error& err) {
const auto err_str = std::string(err.what());
ASSERT_THAT(
err_str, HasSubstr("CUDA device-side assertion failures were found on GPU #0!"));
ASSERT_THAT(
err_str, HasSubstr("Thread ID that failed assertion = [0,0,0]"));
ASSERT_THAT(err_str, HasSubstr("Block ID that failed assertion = [0,0,0]"));
ASSERT_THAT(err_str, HasSubstr("Device that launched kernel = 0"));
ASSERT_THAT(
err_str, HasSubstr(
"Name of kernel launched that led to failure = cuda_always_fail_assertion_kernel"));
ASSERT_THAT(
err_str, HasSubstr("File containing kernel launch = " __FILE__));
ASSERT_THAT(
err_str, HasSubstr(
"Function containing kernel launch = " +
std::string(__FUNCTION__)));
ASSERT_THAT(
err_str, HasSubstr(
"Stream kernel was launched on = " + std::to_string(stream.id())));
}
}
TEST(CUDATest, cuda_device_assertions_1_var_test) {
#ifdef TORCH_USE_CUDA_DSA
c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref().enabled_at_runtime = true;
did_not_fail_diagnostics();
cuda_device_assertions_1_var_test();
#else
GTEST_SKIP() << "CUDA device-side assertions (DSA) was not enabled at compile time.";
#endif
}
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
namespace at::native {
template<typename scalar_t>
struct AbsFunctor {
__device__ __forceinline__ scalar_t operator() (const scalar_t a) const {
return std::abs(a);
}
};
CONSTEXPR_EXCEPT_WIN_HIP char abs_name[] = "abs_kernel";
void abs_kernel_hip(TensorIteratorBase& iter) {
auto dtype = iter.dtype();
if (at::isComplexType(dtype)) {
#if AT_USE_JITERATOR()
static const auto abs_string = jiterator_stringify(
template <typename T> T abs_kernel(T x) { return std::abs(x); });
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, "abs_hip", [&]() {
jitted_gpu_kernel<
/*name=*/abs_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, abs_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, "abs_hip", [&]() {
using opmath_t = at::opmath_type<scalar_t>;
gpu_kernel(iter, AbsFunctor<opmath_t>());
});
#endif
} else {
AT_DISPATCH_ALL_TYPES_AND3(
ScalarType::Half,
ScalarType::BFloat16,
ScalarType::Bool,
iter.dtype(),
"abs_hip",
[&]() { gpu_kernel(iter, AbsFunctor<scalar_t>()); });
}
}
REGISTER_DISPATCH(abs_stub, &abs_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
namespace at::native {
template<typename scalar_t>
struct AbsFunctor {
__device__ __forceinline__ scalar_t operator() (const scalar_t a) const {
return std::abs(a);
}
};
CONSTEXPR_EXCEPT_WIN_CUDA char abs_name[] = "abs_kernel";
void abs_kernel_cuda(TensorIteratorBase& iter) {
auto dtype = iter.dtype();
if (at::isComplexType(dtype)) {
#if AT_USE_JITERATOR()
static const auto abs_string = jiterator_stringify(
template <typename T> T abs_kernel(T x) { return std::abs(x); });
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, "abs_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/abs_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, abs_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, "abs_cuda", [&]() {
using opmath_t = at::opmath_type<scalar_t>;
gpu_kernel(iter, AbsFunctor<opmath_t>());
});
#endif
} else {
AT_DISPATCH_ALL_TYPES_AND3(
ScalarType::Half,
ScalarType::BFloat16,
ScalarType::Bool,
iter.dtype(),
"abs_cuda",
[&]() { gpu_kernel(iter, AbsFunctor<scalar_t>()); });
}
}
REGISTER_DISPATCH(abs_stub, &abs_kernel_cuda);
} // namespace at::native
### |
#include "hip/hip_runtime.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <c10/hip/HIPDeviceAssertion.h>
#include <c10/hip/HIPException.h>
#include <c10/hip/HIPFunctions.h>
#include <c10/hip/HIPStream.h>
#include <chrono>
#include <iostream>
#include <string>
#include <thread>
using ::testing::HasSubstr;
__global__ void hip_multiple_vars_always_fail_assertion_kernel(
const int a, const int b, const int c, const int d, TORCH_DSA_KERNEL_ARGS) {
int i = a + b + c + d;
if (i != 0) {
CUDA_KERNEL_ASSERT2(i == -i);
} else {
CUDA_KERNEL_ASSERT2(i == i + 1);
}
}
__global__ void hip_always_fail_assertion_kernel(
const int a, TORCH_DSA_KERNEL_ARGS) {
CUDA_KERNEL_ASSERT2(a != a);
}
void hip_device_assertions_catches_stream() {
const auto stream = c10::hip::getStreamFromPool();
TORCH_DSA_KERNEL_LAUNCH(
hip_multiple_vars_always_fail_assertion_kernel, 1, 1, 0, stream, 1, 2, 3, 4
);
try {
c10::hip::device_synchronize();
throw std::runtime_error("Test didn't fail, but should have.");
} catch (const c10::Error& err) {
const auto err_str = std::string(err.what());
ASSERT_THAT(
err_str, HasSubstr("# of GPUs this process interacted with = 1"));
ASSERT_THAT(
err_str, HasSubstr("HIP device-side assertion failures were found on GPU #0!"));
ASSERT_THAT(
err_str, HasSubstr("Thread ID that failed assertion = [0,0,0]"));
ASSERT_THAT(err_str, HasSubstr("Block ID that failed assertion = [0,0,0]"));
ASSERT_THAT(err_str, HasSubstr("Device that launched kernel = 0"));
ASSERT_THAT(
err_str, HasSubstr(
"Name of kernel launched that led to failure = hip_multiple_vars_always_fail_assertion_kernel"));
ASSERT_THAT(
err_str, HasSubstr("File containing kernel launch = " __FILE__));
ASSERT_THAT(
err_str, HasSubstr(
"Function containing kernel launch = " +
std::string(__FUNCTION__)));
ASSERT_THAT(
err_str, HasSubstr(
"Stream kernel was launched on = " + std::to_string(stream.id())));
}
}
TEST(HIPTest, hip_device_assertions_catches_stream) {
#ifdef TORCH_USE_HIP_DSA
c10::hip::HIPKernelLaunchRegistry::get_singleton_ref().enabled_at_runtime = true;
hip_device_assertions_catches_stream();
#else
GTEST_SKIP() << "HIP device-side assertions (DSA) was not enabled at compile time.";
#endif
}### |
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <c10/cuda/CUDADeviceAssertion.h>
#include <c10/cuda/CUDAException.h>
#include <c10/cuda/CUDAFunctions.h>
#include <c10/cuda/CUDAStream.h>
#include <chrono>
#include <iostream>
#include <string>
#include <thread>
using ::testing::HasSubstr;
__global__ void cuda_multiple_vars_always_fail_assertion_kernel(
const int a, const int b, const int c, const int d, TORCH_DSA_KERNEL_ARGS) {
int i = a + b + c + d;
if (i != 0) {
CUDA_KERNEL_ASSERT2(i == -i);
} else {
CUDA_KERNEL_ASSERT2(i == i + 1);
}
}
__global__ void cuda_always_fail_assertion_kernel(
const int a, TORCH_DSA_KERNEL_ARGS) {
CUDA_KERNEL_ASSERT2(a != a);
}
void cuda_device_assertions_catches_stream() {
const auto stream = c10::cuda::getStreamFromPool();
TORCH_DSA_KERNEL_LAUNCH(
cuda_multiple_vars_always_fail_assertion_kernel, 1, 1, 0, stream, 1, 2, 3, 4
);
try {
c10::cuda::device_synchronize();
throw std::runtime_error("Test didn't fail, but should have.");
} catch (const c10::Error& err) {
const auto err_str = std::string(err.what());
ASSERT_THAT(
err_str, HasSubstr("# of GPUs this process interacted with = 1"));
ASSERT_THAT(
err_str, HasSubstr("CUDA device-side assertion failures were found on GPU #0!"));
ASSERT_THAT(
err_str, HasSubstr("Thread ID that failed assertion = [0,0,0]"));
ASSERT_THAT(err_str, HasSubstr("Block ID that failed assertion = [0,0,0]"));
ASSERT_THAT(err_str, HasSubstr("Device that launched kernel = 0"));
ASSERT_THAT(
err_str, HasSubstr(
"Name of kernel launched that led to failure = cuda_multiple_vars_always_fail_assertion_kernel"));
ASSERT_THAT(
err_str, HasSubstr("File containing kernel launch = " __FILE__));
ASSERT_THAT(
err_str, HasSubstr(
"Function containing kernel launch = " +
std::string(__FUNCTION__)));
ASSERT_THAT(
err_str, HasSubstr(
"Stream kernel was launched on = " + std::to_string(stream.id())));
}
}
TEST(CUDATest, cuda_device_assertions_catches_stream) {
#ifdef TORCH_USE_CUDA_DSA
c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref().enabled_at_runtime = true;
cuda_device_assertions_catches_stream();
#else
GTEST_SKIP() << "CUDA device-side assertions (DSA) was not enabled at compile time.";
#endif
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <c10/hip/HIPDeviceAssertion.h>
#include <c10/hip/HIPException.h>
#include <c10/hip/HIPFunctions.h>
#include <c10/hip/HIPStream.h>
#include <chrono>
#include <iostream>
#include <string>
#include <thread>
using ::testing::HasSubstr;
/**
* Device kernel that takes 2 arguments
* @param bad_thread represents the thread we want to trigger assertion on.
* @param bad_block represents the block we want to trigger assertion on.
* This kernel will only trigger a device side assertion for <<bad_block,
* bad_thread>> pair. all the other blocks and threads pairs will basically be
* no-op.
*/
__global__ void hip_device_assertions_fail_on_thread_block_kernel(
const int bad_thread,
const int bad_block,
TORCH_DSA_KERNEL_ARGS) {
if (threadIdx.x == bad_thread && blockIdx.x == bad_block) {
CUDA_KERNEL_ASSERT2(false); // This comparison necessarily needs to fail
}
}
/**
* TEST: Triggering device side assertion on only 1 thread from <<<1024,128>>>
* grid. kernel used is unique, it take 2 parameters to tell which particular
* block and thread it should assert, all the other threads of the kernel will
* be basically no-op.
*/
void hip_device_assertions_catches_thread_and_block_and_device() {
const auto stream = c10::hip::getStreamFromPool();
TORCH_DSA_KERNEL_LAUNCH(
hip_device_assertions_fail_on_thread_block_kernel,
1024, /* Blocks */
128, /* Threads */
0, /* Shared mem */
stream, /* Stream */
29, /* bad thread */
937 /* bad block */
);
try {
c10::hip::device_synchronize();
throw std::runtime_error("Test didn't fail, but should have.");
} catch (const c10::Error& err) {
const auto err_str = std::string(err.what());
ASSERT_THAT(
err_str, HasSubstr("Thread ID that failed assertion = [29,0,0]"));
ASSERT_THAT(
err_str, HasSubstr("Block ID that failed assertion = [937,0,0]"));
ASSERT_THAT(err_str, HasSubstr("Device that launched kernel = 0"));
ASSERT_THAT(
err_str,
HasSubstr(
"Name of kernel launched that led to failure = hip_device_assertions_fail_on_thread_block_kernel"));
ASSERT_THAT(
err_str, HasSubstr("File containing kernel launch = " __FILE__));
ASSERT_THAT(
err_str,
HasSubstr(
"Function containing kernel launch = " +
std::string(__FUNCTION__)));
ASSERT_THAT(
err_str,
HasSubstr(
"Stream kernel was launched on = " + std::to_string(stream.id())));
}
}
TEST(HIPTest, hip_device_assertions_catches_thread_and_block_and_device) {
#ifdef TORCH_USE_HIP_DSA
c10::hip::HIPKernelLaunchRegistry::get_singleton_ref().enabled_at_runtime = true;
hip_device_assertions_catches_thread_and_block_and_device();
#else
GTEST_SKIP() << "HIP device-side assertions (DSA) was not enabled at compile time.";
#endif
}
### |
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <c10/cuda/CUDADeviceAssertion.h>
#include <c10/cuda/CUDAException.h>
#include <c10/cuda/CUDAFunctions.h>
#include <c10/cuda/CUDAStream.h>
#include <chrono>
#include <iostream>
#include <string>
#include <thread>
using ::testing::HasSubstr;
/**
* Device kernel that takes 2 arguments
* @param bad_thread represents the thread we want to trigger assertion on.
* @param bad_block represents the block we want to trigger assertion on.
* This kernel will only trigger a device side assertion for <<bad_block,
* bad_thread>> pair. all the other blocks and threads pairs will basically be
* no-op.
*/
__global__ void cuda_device_assertions_fail_on_thread_block_kernel(
const int bad_thread,
const int bad_block,
TORCH_DSA_KERNEL_ARGS) {
if (threadIdx.x == bad_thread && blockIdx.x == bad_block) {
CUDA_KERNEL_ASSERT2(false); // This comparison necessarily needs to fail
}
}
/**
* TEST: Triggering device side assertion on only 1 thread from <<<1024,128>>>
* grid. kernel used is unique, it take 2 parameters to tell which particular
* block and thread it should assert, all the other threads of the kernel will
* be basically no-op.
*/
void cuda_device_assertions_catches_thread_and_block_and_device() {
const auto stream = c10::cuda::getStreamFromPool();
TORCH_DSA_KERNEL_LAUNCH(
cuda_device_assertions_fail_on_thread_block_kernel,
1024, /* Blocks */
128, /* Threads */
0, /* Shared mem */
stream, /* Stream */
29, /* bad thread */
937 /* bad block */
);
try {
c10::cuda::device_synchronize();
throw std::runtime_error("Test didn't fail, but should have.");
} catch (const c10::Error& err) {
const auto err_str = std::string(err.what());
ASSERT_THAT(
err_str, HasSubstr("Thread ID that failed assertion = [29,0,0]"));
ASSERT_THAT(
err_str, HasSubstr("Block ID that failed assertion = [937,0,0]"));
ASSERT_THAT(err_str, HasSubstr("Device that launched kernel = 0"));
ASSERT_THAT(
err_str,
HasSubstr(
"Name of kernel launched that led to failure = cuda_device_assertions_fail_on_thread_block_kernel"));
ASSERT_THAT(
err_str, HasSubstr("File containing kernel launch = " __FILE__));
ASSERT_THAT(
err_str,
HasSubstr(
"Function containing kernel launch = " +
std::string(__FUNCTION__)));
ASSERT_THAT(
err_str,
HasSubstr(
"Stream kernel was launched on = " + std::to_string(stream.id())));
}
}
TEST(CUDATest, cuda_device_assertions_catches_thread_and_block_and_device) {
#ifdef TORCH_USE_CUDA_DSA
c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref().enabled_at_runtime = true;
cuda_device_assertions_catches_thread_and_block_and_device();
#else
GTEST_SKIP() << "CUDA device-side assertions (DSA) was not enabled at compile time.";
#endif
}
### |
#include "hip/hip_runtime.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <c10/hip/HIPDeviceAssertion.h>
#include <c10/hip/HIPException.h>
#include <c10/hip/HIPFunctions.h>
#include <c10/hip/HIPStream.h>
#include <chrono>
#include <iostream>
#include <string>
#include <thread>
using ::testing::HasSubstr;
const auto max_assertions_failure_str =
"Assertion failure " + std::to_string(C10_HIP_DSA_ASSERTION_COUNT - 1);
__global__ void hip_always_fail_assertion_kernel(
const int a, TORCH_DSA_KERNEL_ARGS) {
CUDA_KERNEL_ASSERT2(a != a);
}
__global__ void hip_always_succeed_assertion_kernel(
const int a, TORCH_DSA_KERNEL_ARGS) {
CUDA_KERNEL_ASSERT2(a == a);
}
#ifndef _MSC_VER
void hip_device_assertions_from_2_processes() {
const auto n1 = fork();
if (n1 == 0) {
TORCH_DSA_KERNEL_LAUNCH(
hip_always_fail_assertion_kernel, 1, 1, 0, c10::hip::getStreamFromPool(), 1);
try {
c10::hip::device_synchronize();
throw std::runtime_error("Test didn't fail, but should have.");
} catch (const c10::Error& err) {
const auto err_str = std::string(err.what());
ASSERT_THAT(
err_str, HasSubstr(
"1 HIP device-side assertion failures were found on GPU #0!"));
}
std::this_thread::sleep_for(std::chrono::milliseconds(3000));
} else {
std::this_thread::sleep_for(std::chrono::milliseconds(2000));
TORCH_DSA_KERNEL_LAUNCH(
hip_always_succeed_assertion_kernel, 1, 1, 0, c10::hip::getStreamFromPool(), 1);
try {
c10::hip::device_synchronize();
} catch (const c10::Error& err) {
ASSERT_TRUE(false);
}
exit(0);
}
}
TEST(HIPTest, hip_device_assertions_from_2_processes) {
#ifdef TORCH_USE_HIP_DSA
c10::hip::HIPKernelLaunchRegistry::get_singleton_ref().enabled_at_runtime = true;
hip_device_assertions_from_2_processes();
#else
GTEST_SKIP() << "HIP device-side assertions (DSA) was not enabled at compile time.";
#endif
}
#else
#endif### |
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <c10/cuda/CUDADeviceAssertion.h>
#include <c10/cuda/CUDAException.h>
#include <c10/cuda/CUDAFunctions.h>
#include <c10/cuda/CUDAStream.h>
#include <chrono>
#include <iostream>
#include <string>
#include <thread>
using ::testing::HasSubstr;
const auto max_assertions_failure_str =
"Assertion failure " + std::to_string(C10_CUDA_DSA_ASSERTION_COUNT - 1);
__global__ void cuda_always_fail_assertion_kernel(
const int a, TORCH_DSA_KERNEL_ARGS) {
CUDA_KERNEL_ASSERT2(a != a);
}
__global__ void cuda_always_succeed_assertion_kernel(
const int a, TORCH_DSA_KERNEL_ARGS) {
CUDA_KERNEL_ASSERT2(a == a);
}
#ifndef _MSC_VER
void cuda_device_assertions_from_2_processes() {
const auto n1 = fork();
if (n1 == 0) {
TORCH_DSA_KERNEL_LAUNCH(
cuda_always_fail_assertion_kernel, 1, 1, 0, c10::cuda::getStreamFromPool(), 1);
try {
c10::cuda::device_synchronize();
throw std::runtime_error("Test didn't fail, but should have.");
} catch (const c10::Error& err) {
const auto err_str = std::string(err.what());
ASSERT_THAT(
err_str, HasSubstr(
"1 CUDA device-side assertion failures were found on GPU #0!"));
}
std::this_thread::sleep_for(std::chrono::milliseconds(3000));
} else {
std::this_thread::sleep_for(std::chrono::milliseconds(2000));
TORCH_DSA_KERNEL_LAUNCH(
cuda_always_succeed_assertion_kernel, 1, 1, 0, c10::cuda::getStreamFromPool(), 1);
try {
c10::cuda::device_synchronize();
} catch (const c10::Error& err) {
ASSERT_TRUE(false);
}
exit(0);
}
}
TEST(CUDATest, cuda_device_assertions_from_2_processes) {
#ifdef TORCH_USE_CUDA_DSA
c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref().enabled_at_runtime = true;
cuda_device_assertions_from_2_processes();
#else
GTEST_SKIP() << "CUDA device-side assertions (DSA) was not enabled at compile time.";
#endif
}
#else
#endif
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <c10/hip/HIPDeviceAssertion.h>
#include <c10/hip/HIPException.h>
#include <c10/hip/HIPFunctions.h>
#include <c10/hip/HIPStream.h>
#include <chrono>
#include <iostream>
#include <string>
#include <thread>
using ::testing::HasSubstr;
const auto max_assertions_failure_str =
"Assertion failure " + std::to_string(C10_HIP_DSA_ASSERTION_COUNT - 1);
/**
* Device kernel that takes a single integer parameter as argument and
* will always trigger a device side assertion.
*/
__global__ void hip_always_fail_assertion_kernel(
const int a,
TORCH_DSA_KERNEL_ARGS) {
CUDA_KERNEL_ASSERT2(a != a);
}
/**
* TEST: Triggering device side assertion from multiple block but single thread
* <<<10,128>>>. Here we are triggering assertion on 10 blocks, each with only
* 128 thread.
*/
void hip_device_assertions_multiple_writes_from_blocks_and_threads() {
bool run_threads = false;
// Create a function to launch kernel that waits for a signal, to try to
// ensure everything is happening simultaneously
const auto launch_the_kernel = [&]() {
// Busy loop waiting for the signal to go
while (!run_threads) {
}
TORCH_DSA_KERNEL_LAUNCH(
hip_always_fail_assertion_kernel,
10, /* Blocks */
128, /* Threads */
0, /* Shared mem */
c10::hip::getCurrentHIPStream(), /* Stream */
1);
};
// Spin up a bunch of busy-looping threads
std::vector<std::thread> threads;
for (int i = 0; i < 10; i++) {
threads.emplace_back(launch_the_kernel);
}
// Paranoid - wait for all the threads to get setup
std::this_thread::sleep_for(std::chrono::milliseconds(100));
// Mash
run_threads = true;
// Clean-up
for (auto& x : threads) {
x.join();
}
try {
c10::hip::device_synchronize();
throw std::runtime_error("Test didn't fail, but should have.");
} catch (const c10::Error& err) {
const auto err_str = std::string(err.what());
ASSERT_THAT(err_str, HasSubstr(max_assertions_failure_str));
ASSERT_THAT(err_str, HasSubstr("Device that launched kernel = 0"));
ASSERT_THAT(
err_str,
HasSubstr(
"Name of kernel launched that led to failure = hip_always_fail_assertion_kernel"));
ASSERT_THAT(
err_str, HasSubstr("File containing kernel launch = " __FILE__));
}
}
TEST(HIPTest, hip_device_assertions_multiple_writes_from_blocks_and_threads) {
#ifdef TORCH_USE_HIP_DSA
c10::hip::HIPKernelLaunchRegistry::get_singleton_ref().enabled_at_runtime = true;
hip_device_assertions_multiple_writes_from_blocks_and_threads();
#else
GTEST_SKIP() << "HIP device-side assertions (DSA) was not enabled at compile time.";
#endif
}
### |
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <c10/cuda/CUDADeviceAssertion.h>
#include <c10/cuda/CUDAException.h>
#include <c10/cuda/CUDAFunctions.h>
#include <c10/cuda/CUDAStream.h>
#include <chrono>
#include <iostream>
#include <string>
#include <thread>
using ::testing::HasSubstr;
const auto max_assertions_failure_str =
"Assertion failure " + std::to_string(C10_CUDA_DSA_ASSERTION_COUNT - 1);
/**
* Device kernel that takes a single integer parameter as argument and
* will always trigger a device side assertion.
*/
__global__ void cuda_always_fail_assertion_kernel(
const int a,
TORCH_DSA_KERNEL_ARGS) {
CUDA_KERNEL_ASSERT2(a != a);
}
/**
* TEST: Triggering device side assertion from multiple block but single thread
* <<<10,128>>>. Here we are triggering assertion on 10 blocks, each with only
* 128 thread.
*/
void cuda_device_assertions_multiple_writes_from_blocks_and_threads() {
bool run_threads = false;
// Create a function to launch kernel that waits for a signal, to try to
// ensure everything is happening simultaneously
const auto launch_the_kernel = [&]() {
// Busy loop waiting for the signal to go
while (!run_threads) {
}
TORCH_DSA_KERNEL_LAUNCH(
cuda_always_fail_assertion_kernel,
10, /* Blocks */
128, /* Threads */
0, /* Shared mem */
c10::cuda::getCurrentCUDAStream(), /* Stream */
1);
};
// Spin up a bunch of busy-looping threads
std::vector<std::thread> threads;
for (int i = 0; i < 10; i++) {
threads.emplace_back(launch_the_kernel);
}
// Paranoid - wait for all the threads to get setup
std::this_thread::sleep_for(std::chrono::milliseconds(100));
// Mash
run_threads = true;
// Clean-up
for (auto& x : threads) {
x.join();
}
try {
c10::cuda::device_synchronize();
throw std::runtime_error("Test didn't fail, but should have.");
} catch (const c10::Error& err) {
const auto err_str = std::string(err.what());
ASSERT_THAT(err_str, HasSubstr(max_assertions_failure_str));
ASSERT_THAT(err_str, HasSubstr("Device that launched kernel = 0"));
ASSERT_THAT(
err_str,
HasSubstr(
"Name of kernel launched that led to failure = cuda_always_fail_assertion_kernel"));
ASSERT_THAT(
err_str, HasSubstr("File containing kernel launch = " __FILE__));
}
}
TEST(CUDATest, cuda_device_assertions_multiple_writes_from_blocks_and_threads) {
#ifdef TORCH_USE_CUDA_DSA
c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref().enabled_at_runtime = true;
cuda_device_assertions_multiple_writes_from_blocks_and_threads();
#else
GTEST_SKIP() << "CUDA device-side assertions (DSA) was not enabled at compile time.";
#endif
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <c10/hip/HIPDeviceAssertion.h>
#include <c10/hip/HIPException.h>
#include <c10/hip/HIPFunctions.h>
#include <c10/hip/HIPStream.h>
#include <chrono>
#include <iostream>
#include <string>
#include <thread>
using ::testing::HasSubstr;
const auto max_assertions_failure_str =
"Assertion failure " + std::to_string(C10_HIP_DSA_ASSERTION_COUNT - 1);
/**
* Device kernel that takes a single integer parameter as argument and
* will always trigger a device side assertion.
*/
__global__ void hip_always_fail_assertion_kernel(
const int a,
TORCH_DSA_KERNEL_ARGS) {
CUDA_KERNEL_ASSERT2(a != a);
}
/**
* TEST: Triggering device side assertion from single block and multiple threads
* <<<1,128>>>. Once the very first thread asserts all the other threads will
* basically be in bad state and the block id with failed assertion would be
* [0,0,0].
*/
void hip_device_assertions_multiple_writes_from_same_block() {
const auto stream = c10::hip::getStreamFromPool();
TORCH_DSA_KERNEL_LAUNCH(
hip_always_fail_assertion_kernel,
1, /* Blocks */
128, /* Threads */
0, /* Shared mem */
stream, /* Stream */
1);
try {
c10::hip::device_synchronize();
throw std::runtime_error("Test didn't fail, but should have.");
} catch (const c10::Error& err) {
const auto err_str = std::string(err.what());
ASSERT_THAT(err_str, HasSubstr(max_assertions_failure_str));
ASSERT_THAT(err_str, HasSubstr("Block ID that failed assertion = [0,0,0]"));
ASSERT_THAT(err_str, HasSubstr("Device that launched kernel = 0"));
ASSERT_THAT(
err_str,
HasSubstr(
"Name of kernel launched that led to failure = hip_always_fail_assertion_kernel"));
ASSERT_THAT(
err_str, HasSubstr("File containing kernel launch = " __FILE__));
ASSERT_THAT(
err_str,
HasSubstr(
"Function containing kernel launch = " +
std::string(__FUNCTION__)));
ASSERT_THAT(
err_str,
HasSubstr(
"Stream kernel was launched on = " + std::to_string(stream.id())));
}
}
TEST(HIPTest, hip_device_assertions_multiple_writes_from_same_block) {
#ifdef TORCH_USE_HIP_DSA
c10::hip::HIPKernelLaunchRegistry::get_singleton_ref().enabled_at_runtime = true;
hip_device_assertions_multiple_writes_from_same_block();
#else
GTEST_SKIP() << "HIP device-side assertions (DSA) was not enabled at compile time.";
#endif
}
### |
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <c10/cuda/CUDADeviceAssertion.h>
#include <c10/cuda/CUDAException.h>
#include <c10/cuda/CUDAFunctions.h>
#include <c10/cuda/CUDAStream.h>
#include <chrono>
#include <iostream>
#include <string>
#include <thread>
using ::testing::HasSubstr;
const auto max_assertions_failure_str =
"Assertion failure " + std::to_string(C10_CUDA_DSA_ASSERTION_COUNT - 1);
/**
* Device kernel that takes a single integer parameter as argument and
* will always trigger a device side assertion.
*/
__global__ void cuda_always_fail_assertion_kernel(
const int a,
TORCH_DSA_KERNEL_ARGS) {
CUDA_KERNEL_ASSERT2(a != a);
}
/**
* TEST: Triggering device side assertion from single block and multiple threads
* <<<1,128>>>. Once the very first thread asserts all the other threads will
* basically be in bad state and the block id with failed assertion would be
* [0,0,0].
*/
void cuda_device_assertions_multiple_writes_from_same_block() {
const auto stream = c10::cuda::getStreamFromPool();
TORCH_DSA_KERNEL_LAUNCH(
cuda_always_fail_assertion_kernel,
1, /* Blocks */
128, /* Threads */
0, /* Shared mem */
stream, /* Stream */
1);
try {
c10::cuda::device_synchronize();
throw std::runtime_error("Test didn't fail, but should have.");
} catch (const c10::Error& err) {
const auto err_str = std::string(err.what());
ASSERT_THAT(err_str, HasSubstr(max_assertions_failure_str));
ASSERT_THAT(err_str, HasSubstr("Block ID that failed assertion = [0,0,0]"));
ASSERT_THAT(err_str, HasSubstr("Device that launched kernel = 0"));
ASSERT_THAT(
err_str,
HasSubstr(
"Name of kernel launched that led to failure = cuda_always_fail_assertion_kernel"));
ASSERT_THAT(
err_str, HasSubstr("File containing kernel launch = " __FILE__));
ASSERT_THAT(
err_str,
HasSubstr(
"Function containing kernel launch = " +
std::string(__FUNCTION__)));
ASSERT_THAT(
err_str,
HasSubstr(
"Stream kernel was launched on = " + std::to_string(stream.id())));
}
}
TEST(CUDATest, cuda_device_assertions_multiple_writes_from_same_block) {
#ifdef TORCH_USE_CUDA_DSA
c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref().enabled_at_runtime = true;
cuda_device_assertions_multiple_writes_from_same_block();
#else
GTEST_SKIP() << "CUDA device-side assertions (DSA) was not enabled at compile time.";
#endif
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/contrib/aten/aten_op.h"
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(ATen, ATenOp<HIPContext>);
template<>
at::Backend ATenOp<HIPContext>::backend() const {
return at::Backend::HIP;
}
}
### |
#include "caffe2/contrib/aten/aten_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(ATen, ATenOp<CUDAContext>);
template<>
at::Backend ATenOp<CUDAContext>::backend() const {
return at::Backend::CUDA;
}
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/contrib/gloo/broadcast_ops.h"
#include "caffe2/core/hip/context_gpu.h"
#include <gloo/hip_broadcast_one_to_all.h>
namespace caffe2 {
namespace gloo {
template <class Context>
void BroadcastOp<Context>::initializeAlgorithm() {
if (init_.template IsType<float>()) {
algorithm_.reset(new ::gloo::HipBroadcastOneToAll<float>(
init_.context, init_.template getOutputs<float>(), init_.size, root_));
} else if (init_.template IsType<long>()) {
algorithm_.reset(new ::gloo::HipBroadcastOneToAll<long>(
init_.context, init_.template getOutputs<long>(), init_.size, root_));
} else if (init_.template IsType<int>()) {
algorithm_.reset(new ::gloo::HipBroadcastOneToAll<int>(
init_.context, init_.template getOutputs<int>(), init_.size, root_));
} else if (init_.template IsType<at::Half>()) {
algorithm_.reset(new ::gloo::HipBroadcastOneToAll<::gloo::float16>(
init_.context,
init_.template getOutputs<::gloo::float16>(),
init_.size,
root_));
} else {
CAFFE_ENFORCE(false, "Unhandled type: ", init_.meta.name());
}
}
namespace {
REGISTER_HIP_OPERATOR_WITH_ENGINE(Broadcast, GLOO, BroadcastOp<HIPContext>);
} // namespace
} // namespace gloo
} // namespace caffe2
### |
#include "caffe2/contrib/gloo/broadcast_ops.h"
#include "caffe2/core/context_gpu.h"
#include <gloo/cuda_broadcast_one_to_all.h>
namespace caffe2 {
namespace gloo {
template <class Context>
void BroadcastOp<Context>::initializeAlgorithm() {
if (init_.template IsType<float>()) {
algorithm_.reset(new ::gloo::CudaBroadcastOneToAll<float>(
init_.context, init_.template getOutputs<float>(), init_.size, root_));
} else if (init_.template IsType<long>()) {
algorithm_.reset(new ::gloo::CudaBroadcastOneToAll<long>(
init_.context, init_.template getOutputs<long>(), init_.size, root_));
} else if (init_.template IsType<int>()) {
algorithm_.reset(new ::gloo::CudaBroadcastOneToAll<int>(
init_.context, init_.template getOutputs<int>(), init_.size, root_));
} else if (init_.template IsType<at::Half>()) {
algorithm_.reset(new ::gloo::CudaBroadcastOneToAll<::gloo::float16>(
init_.context,
init_.template getOutputs<::gloo::float16>(),
init_.size,
root_));
} else {
CAFFE_ENFORCE(false, "Unhandled type: ", init_.meta.name());
}
}
namespace {
REGISTER_CUDA_OPERATOR_WITH_ENGINE(Broadcast, GLOO, BroadcastOp<CUDAContext>);
} // namespace
} // namespace gloo
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/contrib/gloo/common_world_ops.h"
#include "caffe2/core/hip/context_gpu.h"
#include <gloo/hip.h>
#include <gloo/transport/tcp/device.h>
namespace caffe2 {
namespace gloo {
template <>
void CreateCommonWorld<HIPContext>::initializeForContext() {
static std::once_flag once;
std::call_once(once, [&]() {
// This is the first time we call Gloo code for a HIPContext.
// Share Caffe2 HIP mutex with Gloo.
::gloo::HipShared::setMutex(&HIPContext::mutex());
});
}
namespace {
REGISTER_HIP_OPERATOR_WITH_ENGINE(
CreateCommonWorld,
GLOO,
CreateCommonWorld<HIPContext>);
REGISTER_HIP_OPERATOR_WITH_ENGINE(
CloneCommonWorld,
GLOO,
CloneCommonWorld<HIPContext>);
} // namespace
} // namespace gloo
} // namespace caffe2
### |
#include "caffe2/contrib/gloo/common_world_ops.h"
#include "caffe2/core/context_gpu.h"
#include <gloo/cuda.h>
#include <gloo/transport/tcp/device.h>
namespace caffe2 {
namespace gloo {
template <>
void CreateCommonWorld<CUDAContext>::initializeForContext() {
static std::once_flag once;
std::call_once(once, [&]() {
// This is the first time we call Gloo code for a CUDAContext.
// Share Caffe2 CUDA mutex with Gloo.
::gloo::CudaShared::setMutex(&CUDAContext::mutex());
});
}
namespace {
REGISTER_CUDA_OPERATOR_WITH_ENGINE(
CreateCommonWorld,
GLOO,
CreateCommonWorld<CUDAContext>);
REGISTER_CUDA_OPERATOR_WITH_ENGINE(
CloneCommonWorld,
GLOO,
CloneCommonWorld<CUDAContext>);
} // namespace
} // namespace gloo
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#pragma once
#include <cstddef>
#include "caffe2/core/hip/common_gpu.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/core/logging.h"
#include <rccl.h>
#include <unordered_map>
#define NCCL_VERSION_MIN(major, minor, patch) \
((NCCL_MAJOR > major) || \
((NCCL_MAJOR == major) && \
((NCCL_MINOR > minor) || \
((NCCL_MINOR == minor) && (NCCL_PATCH >= patch)))))
namespace caffe2 {
namespace nccl {
#define CAFFE_NCCL_CHECK(condition) \
do { \
ncclResult_t status = (condition); \
CAFFE_ENFORCE_EQ( \
status, \
ncclSuccess, \
" ", \
"Error at: ", \
__FILE__, \
__LINE__, \
": ", \
ncclGetErrorString(status)); \
} while (0)
struct NCCLElement {
const TensorHIP* src{nullptr};
TensorHIP* dst{nullptr};
int device{0};
};
struct NCCLExecution {
int stream_gpu_id{0};
hipStream_t stream{nullptr};
std::vector<NCCLElement> elements;
size_t root{0};
};
// Called when the last NCCL op is destructed and all lazily created
// NCCLContext instances can safely be destroyed.
void destroyContexts();
template <typename T>
class NCCL {
public:
static void AllReduce(const NCCLExecution& ex);
static void Broadcast(const NCCLExecution& ex);
static void Reduce(const NCCLExecution& ex);
static void AllGather(const NCCLExecution& ex);
static void ReduceScatter(const NCCLExecution& ex);
};
} // namespace nccl
} // namespace caffe2
### |
#pragma once
#include <cstddef>
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/logging.h"
#include <nccl.h>
#include <unordered_map>
#define NCCL_VERSION_MIN(major, minor, patch) \
((NCCL_MAJOR > major) || \
((NCCL_MAJOR == major) && \
((NCCL_MINOR > minor) || \
((NCCL_MINOR == minor) && (NCCL_PATCH >= patch)))))
namespace caffe2 {
namespace nccl {
#define CAFFE_NCCL_CHECK(condition) \
do { \
ncclResult_t status = (condition); \
CAFFE_ENFORCE_EQ( \
status, \
ncclSuccess, \
" ", \
"Error at: ", \
__FILE__, \
__LINE__, \
": ", \
ncclGetErrorString(status)); \
} while (0)
struct NCCLElement {
const TensorCUDA* src{nullptr};
TensorCUDA* dst{nullptr};
int device{0};
};
struct NCCLExecution {
int stream_gpu_id{0};
cudaStream_t stream{nullptr};
std::vector<NCCLElement> elements;
size_t root{0};
};
// Called when the last NCCL op is destructed and all lazily created
// NCCLContext instances can safely be destroyed.
void destroyContexts();
template <typename T>
class NCCL {
public:
static void AllReduce(const NCCLExecution& ex);
static void Broadcast(const NCCLExecution& ex);
static void Reduce(const NCCLExecution& ex);
static void AllGather(const NCCLExecution& ex);
static void ReduceScatter(const NCCLExecution& ex);
};
} // namespace nccl
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/blob.h"
#include "caffe2/core/blob_serialization.h"
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
namespace {
REGISTER_BLOB_DESERIALIZER(TensorHIP, TensorDeserializer);
}
} // namespace caffe2
### |
#include "caffe2/core/blob.h"
#include "caffe2/core/blob_serialization.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
REGISTER_BLOB_DESERIALIZER(TensorCUDA, TensorDeserializer);
}
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/core/TensorBase.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <ATen/hip\ApplyGridUtils.cuh>
#include <ATen/hip/detail\OffsetCalculator.cuh>
#include <ATen/native/hip\Loops.cuh>
namespace at::native {
namespace {
void elu_kernel(
TensorIteratorBase& iter,
const Scalar& alpha,
const Scalar& scale,
const Scalar& input_scale) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"elu_hip",
[&]() {
using opmath_t = at::opmath_type<scalar_t>;
auto negcoef = alpha.to<opmath_t>() * scale.to<opmath_t>();
auto poscoef = scale.to<opmath_t>();
auto negiptcoef = input_scale.to<opmath_t>();
gpu_kernel(
iter,
[negcoef, poscoef, negiptcoef] GPU_LAMBDA(scalar_t a) -> scalar_t {
opmath_t aop = static_cast<opmath_t>(a);
return aop > 0 ? aop * poscoef
: std::expm1(aop * negiptcoef) * negcoef;
});
});
}
void elu_backward_kernel(
TensorIteratorBase& iter,
const Scalar& alpha,
const Scalar& scale,
const Scalar& input_scale,
bool is_result) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"elu_backward_hip",
[&]() {
using opmath_t = at::opmath_type<scalar_t>;
auto negcoef = alpha.to<opmath_t>() * scale.to<opmath_t>();
auto poscoef = scale.to<opmath_t>();
auto negiptcoef = input_scale.to<opmath_t>();
gpu_kernel(
iter,
[negcoef, poscoef, negiptcoef, is_result] GPU_LAMBDA(
scalar_t a, scalar_t b) -> scalar_t {
opmath_t aop = static_cast<opmath_t>(a);
opmath_t bop = static_cast<opmath_t>(b);
if (is_result) {
return bop <= 0 ? aop * negiptcoef * (bop + negcoef)
: aop * poscoef;
} else {
return bop <= 0
? aop * negiptcoef * negcoef * ::exp(bop * negiptcoef)
: aop * poscoef;
}
});
});
}
} // namespace
REGISTER_DISPATCH(elu_stub, &elu_kernel);
REGISTER_DISPATCH(elu_backward_stub, &elu_backward_kernel);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/core/TensorBase.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <ATen/cuda/ApplyGridUtils.cuh>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/native/cuda/Loops.cuh>
namespace at::native {
namespace {
void elu_kernel(
TensorIteratorBase& iter,
const Scalar& alpha,
const Scalar& scale,
const Scalar& input_scale) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"elu_cuda",
[&]() {
using opmath_t = at::opmath_type<scalar_t>;
auto negcoef = alpha.to<opmath_t>() * scale.to<opmath_t>();
auto poscoef = scale.to<opmath_t>();
auto negiptcoef = input_scale.to<opmath_t>();
gpu_kernel(
iter,
[negcoef, poscoef, negiptcoef] GPU_LAMBDA(scalar_t a) -> scalar_t {
opmath_t aop = static_cast<opmath_t>(a);
return aop > 0 ? aop * poscoef
: std::expm1(aop * negiptcoef) * negcoef;
});
});
}
void elu_backward_kernel(
TensorIteratorBase& iter,
const Scalar& alpha,
const Scalar& scale,
const Scalar& input_scale,
bool is_result) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"elu_backward_cuda",
[&]() {
using opmath_t = at::opmath_type<scalar_t>;
auto negcoef = alpha.to<opmath_t>() * scale.to<opmath_t>();
auto poscoef = scale.to<opmath_t>();
auto negiptcoef = input_scale.to<opmath_t>();
gpu_kernel(
iter,
[negcoef, poscoef, negiptcoef, is_result] GPU_LAMBDA(
scalar_t a, scalar_t b) -> scalar_t {
opmath_t aop = static_cast<opmath_t>(a);
opmath_t bop = static_cast<opmath_t>(b);
if (is_result) {
return bop <= 0 ? aop * negiptcoef * (bop + negcoef)
: aop * poscoef;
} else {
return bop <= 0
? aop * negiptcoef * negcoef * std::exp(bop * negiptcoef)
: aop * poscoef;
}
});
});
}
} // namespace
REGISTER_DISPATCH(elu_stub, &elu_kernel);
REGISTER_DISPATCH(elu_backward_stub, &elu_backward_kernel);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#include <gtest/gtest.h>
#include "caffe2/core/context.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/core/event.h"
namespace caffe2 {
TEST(EventHIPTest, EventBasics) {
if (!HasHipGPU())
return;
DeviceOption device_cpu;
device_cpu.set_device_type(PROTO_CPU);
DeviceOption device_hip;
device_hip.set_device_type(PROTO_HIP);
CPUContext context_cpu(device_cpu);
HIPContext context_hip(device_hip);
Event event_cpu(device_cpu);
Event event_hip(device_hip);
// CPU context and event interactions
context_cpu.Record(&event_cpu);
event_cpu.SetFinished();
event_cpu.Finish();
context_cpu.WaitEvent(event_cpu);
event_cpu.Reset();
event_cpu.Record(CPU, &context_cpu);
event_cpu.SetFinished();
event_cpu.Wait(CPU, &context_cpu);
// HIP context and event interactions
context_hip.SwitchToDevice();
context_hip.Record(&event_hip);
context_hip.WaitEvent(event_hip);
event_hip.Finish();
event_hip.Reset();
event_hip.Record(HIP, &context_hip);
event_hip.Wait(HIP, &context_hip);
// CPU context waiting for HIP event
context_cpu.WaitEvent(event_hip);
// HIP context waiting for CPU event
context_hip.WaitEvent(event_cpu);
}
} // namespace caffe2
### |
#include <gtest/gtest.h>
#include "caffe2/core/context.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/event.h"
namespace caffe2 {
TEST(EventCUDATest, EventBasics) {
if (!HasCudaGPU())
return;
DeviceOption device_cpu;
device_cpu.set_device_type(PROTO_CPU);
DeviceOption device_cuda;
device_cuda.set_device_type(PROTO_CUDA);
CPUContext context_cpu(device_cpu);
CUDAContext context_cuda(device_cuda);
Event event_cpu(device_cpu);
Event event_cuda(device_cuda);
// CPU context and event interactions
context_cpu.Record(&event_cpu);
event_cpu.SetFinished();
event_cpu.Finish();
context_cpu.WaitEvent(event_cpu);
event_cpu.Reset();
event_cpu.Record(CPU, &context_cpu);
event_cpu.SetFinished();
event_cpu.Wait(CPU, &context_cpu);
// CUDA context and event interactions
context_cuda.SwitchToDevice();
context_cuda.Record(&event_cuda);
context_cuda.WaitEvent(event_cuda);
event_cuda.Finish();
event_cuda.Reset();
event_cuda.Record(CUDA, &context_cuda);
event_cuda.Wait(CUDA, &context_cuda);
// CPU context waiting for CUDA event
context_cpu.WaitEvent(event_cuda);
// CUDA context waiting for CPU event
context_cuda.WaitEvent(event_cpu);
}
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include <string>
#include <gtest/gtest.h>
#include "caffe2/core/hip/common_gpu.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
class JustTest : public OperatorBase {
public:
using OperatorBase::OperatorBase;
bool Run(int /* unused */ /*stream_id*/) override {
return true;
}
virtual std::string type() {
return "BASE";
}
};
class JustTestHIP : public JustTest {
public:
using JustTest::JustTest;
bool Run(int /* unused */ /*stream_id*/) override {
return true;
}
std::string type() override {
return "HIP";
}
};
class JustTestMIOPEN : public JustTest {
public:
using JustTest::JustTest;
bool Run(int /* unused */ /*stream_id*/) override {
return true;
}
std::string type() override {
return "MIOPEN";
}
};
OPERATOR_SCHEMA(JustTest).NumInputs(0, 1).NumOutputs(0, 1);
REGISTER_HIP_OPERATOR(JustTest, JustTestHIP);
REGISTER_MIOPEN_OPERATOR(JustTest, JustTestMIOPEN);
TEST(EnginePrefTest, GPUDeviceDefaultPreferredEngines) {
if (!HasHipGPU())
return;
OperatorDef op_def;
Workspace ws;
op_def.mutable_device_option()->set_device_type(PROTO_HIP);
op_def.set_type("JustTest");
{
const auto op = CreateOperator(op_def, &ws);
EXPECT_NE(nullptr, op.get());
// MIOPEN should be taken as it's in the default global preferred engines
// list
EXPECT_EQ(static_cast<JustTest*>(op.get())->type(), "MIOPEN");
}
}
} // namespace caffe2
### |
#include <string>
#include <gtest/gtest.h>
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
class JustTest : public OperatorBase {
public:
using OperatorBase::OperatorBase;
bool Run(int /* unused */ /*stream_id*/) override {
return true;
}
virtual std::string type() {
return "BASE";
}
};
class JustTestCUDA : public JustTest {
public:
using JustTest::JustTest;
bool Run(int /* unused */ /*stream_id*/) override {
return true;
}
std::string type() override {
return "CUDA";
}
};
class JustTestCUDNN : public JustTest {
public:
using JustTest::JustTest;
bool Run(int /* unused */ /*stream_id*/) override {
return true;
}
std::string type() override {
return "CUDNN";
}
};
OPERATOR_SCHEMA(JustTest).NumInputs(0, 1).NumOutputs(0, 1);
REGISTER_CUDA_OPERATOR(JustTest, JustTestCUDA);
REGISTER_CUDNN_OPERATOR(JustTest, JustTestCUDNN);
TEST(EnginePrefTest, GPUDeviceDefaultPreferredEngines) {
if (!HasCudaGPU())
return;
OperatorDef op_def;
Workspace ws;
op_def.mutable_device_option()->set_device_type(PROTO_CUDA);
op_def.set_type("JustTest");
{
const auto op = CreateOperator(op_def, &ws);
EXPECT_NE(nullptr, op.get());
// CUDNN should be taken as it's in the default global preferred engines
// list
EXPECT_EQ(static_cast<JustTest*>(op.get())->type(), "CUDNN");
}
}
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/db/create_db_op.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(CreateDB, CreateDBOp<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/db/create_db_op.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(CreateDB, CreateDBOp<CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/distributed/file_store_handler_op.h"
#if !defined(USE_ROCM)
#include <caffe2/core/hip/context_gpu.h>
#else
#include <caffe2/core/hip/context_gpu.h>
#endif
namespace caffe2 {
#if !defined(USE_ROCM)
REGISTER_HIP_OPERATOR(
FileStoreHandlerCreate,
FileStoreHandlerCreateOp<HIPContext>);
#else
REGISTER_HIP_OPERATOR(
FileStoreHandlerCreate,
FileStoreHandlerCreateOp<HIPContext>);
#endif
} // namespace caffe2
### |
#include "caffe2/distributed/file_store_handler_op.h"
#if !defined(USE_ROCM)
#include <caffe2/core/context_gpu.h>
#else
#include <caffe2/core/hip/context_gpu.h>
#endif
namespace caffe2 {
#if !defined(USE_ROCM)
REGISTER_CUDA_OPERATOR(
FileStoreHandlerCreate,
FileStoreHandlerCreateOp<CUDAContext>);
#else
REGISTER_HIP_OPERATOR(
FileStoreHandlerCreate,
FileStoreHandlerCreateOp<HIPContext>);
#endif
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/distributed/redis_store_handler_op.h"
#if !defined(USE_ROCM)
#include <caffe2/core/hip/context_gpu.h>
#else
#include <caffe2/core/hip/context_gpu.h>
#endif
namespace caffe2 {
#if !defined(USE_ROCM)
REGISTER_HIP_OPERATOR(
RedisStoreHandlerCreate,
RedisStoreHandlerCreateOp<HIPContext>);
#else
REGISTER_HIP_OPERATOR(
RedisStoreHandlerCreate,
RedisStoreHandlerCreateOp<HIPContext>);
#endif
} // namespace caffe2
### |
#include "caffe2/distributed/redis_store_handler_op.h"
#if !defined(USE_ROCM)
#include <caffe2/core/context_gpu.h>
#else
#include <caffe2/core/hip/context_gpu.h>
#endif
namespace caffe2 {
#if !defined(USE_ROCM)
REGISTER_CUDA_OPERATOR(
RedisStoreHandlerCreate,
RedisStoreHandlerCreateOp<CUDAContext>);
#else
REGISTER_HIP_OPERATOR(
RedisStoreHandlerCreate,
RedisStoreHandlerCreateOp<HIPContext>);
#endif
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/common_gpu.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/image/image_input_op.h"
namespace caffe2 {
template <>
bool ImageInputOp<HIPContext>::ApplyTransformOnGPU(
const std::vector<std::int64_t>& dims,
const c10::Device& type) {
// GPU transform kernel allows explicitly setting output type
if (output_type_ == TensorProto_DataType_FLOAT) {
auto* image_output =
OperatorBase::OutputTensor(0, dims, at::dtype<float>().device(type));
TransformOnGPU<uint8_t, float, HIPContext>(
prefetched_image_on_device_,
image_output,
mean_gpu_,
std_gpu_,
&context_);
} else if (output_type_ == TensorProto_DataType_FLOAT16) {
auto* image_output =
OperatorBase::OutputTensor(0, dims, at::dtype<at::Half>().device(type));
TransformOnGPU<uint8_t, at::Half, HIPContext>(
prefetched_image_on_device_,
image_output,
mean_gpu_,
std_gpu_,
&context_);
} else {
return false;
}
return true;
}
REGISTER_HIP_OPERATOR(ImageInput, ImageInputOp<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/image/image_input_op.h"
namespace caffe2 {
template <>
bool ImageInputOp<CUDAContext>::ApplyTransformOnGPU(
const std::vector<std::int64_t>& dims,
const c10::Device& type) {
// GPU transform kernel allows explicitly setting output type
if (output_type_ == TensorProto_DataType_FLOAT) {
auto* image_output =
OperatorBase::OutputTensor(0, dims, at::dtype<float>().device(type));
TransformOnGPU<uint8_t, float, CUDAContext>(
prefetched_image_on_device_,
image_output,
mean_gpu_,
std_gpu_,
&context_);
} else if (output_type_ == TensorProto_DataType_FLOAT16) {
auto* image_output =
OperatorBase::OutputTensor(0, dims, at::dtype<at::Half>().device(type));
TransformOnGPU<uint8_t, at::Half, CUDAContext>(
prefetched_image_on_device_,
image_output,
mean_gpu_,
std_gpu_,
&context_);
} else {
return false;
}
return true;
}
REGISTER_CUDA_OPERATOR(ImageInput, ImageInputOp<CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/image/transform_gpu.h"
#include "caffe2/utils/conversions.h"
/**
*
* Copyright (c) 2016, NVIDIA CORPORATION, All rights reserved
* Distributed under 2-clause BSD license; see accompanying LICENSE file
*
**/
namespace caffe2 {
namespace {
// input in (int8, NHWC), output in (fp32, NCHW)
template <typename In, typename Out>
__global__ void transform_kernel(
const int C,
const int H,
const int W,
const float* mean,
const float* std,
const In* in,
Out* out) {
const auto n = blockIdx.x;
const auto nStride = C*H*W;
// pointers to data for this image
const In *const input_ptr = &in[n*nStride];
Out *const output_ptr = &out[n*nStride];
// either read or write uncoalesced - try reading
for (int c=0; c < C; ++c) {
for (int h=threadIdx.y; h < H; h += blockDim.y) {
for (int w=threadIdx.x; w < W; w += blockDim.x) {
const int in_idx = c + C*w + C*W*h; // HWC
const int out_idx = c*H*W + h*W + w; // CHW
output_ptr[out_idx] = convert::To<float,Out>(
(convert::To<In,float>(input_ptr[in_idx])-mean[c]) * std[c]);
}
}
}
}
}
template <typename T_IN, typename T_OUT, class Context>
bool TransformOnGPU(
Tensor& X,
Tensor* Y,
Tensor& mean,
Tensor& std,
Context* context) {
const int N = X.dim32(0), C = X.dim32(3), H = X.dim32(1), W = X.dim32(2);
auto* input_data = X.template data<T_IN>();
auto* output_data = Y->template mutable_data<T_OUT>();
hipLaunchKernelGGL(( transform_kernel<
T_IN, T_OUT>), dim3(N), dim3(dim3(16, 16)), 0, context->hip_stream(),
C, H, W, mean.template data<float>(), std.template data<float>(),
input_data, output_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
};
template bool TransformOnGPU<uint8_t, float, HIPContext>(
Tensor& X,
Tensor* Y,
Tensor& mean,
Tensor& std,
HIPContext* context);
template bool TransformOnGPU<uint8_t, at::Half, HIPContext>(
Tensor& X,
Tensor* Y,
Tensor& mean,
Tensor& std,
HIPContext* context);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/image/transform_gpu.h"
#include "caffe2/utils/conversions.h"
/**
*
* Copyright (c) 2016, NVIDIA CORPORATION, All rights reserved
* Distributed under 2-clause BSD license; see accompanying LICENSE file
*
**/
namespace caffe2 {
namespace {
// input in (int8, NHWC), output in (fp32, NCHW)
template <typename In, typename Out>
__global__ void transform_kernel(
const int C,
const int H,
const int W,
const float* mean,
const float* std,
const In* in,
Out* out) {
const auto n = blockIdx.x;
const auto nStride = C*H*W;
// pointers to data for this image
const In *const input_ptr = &in[n*nStride];
Out *const output_ptr = &out[n*nStride];
// either read or write uncoalesced - try reading
for (int c=0; c < C; ++c) {
for (int h=threadIdx.y; h < H; h += blockDim.y) {
for (int w=threadIdx.x; w < W; w += blockDim.x) {
const int in_idx = c + C*w + C*W*h; // HWC
const int out_idx = c*H*W + h*W + w; // CHW
output_ptr[out_idx] = convert::To<float,Out>(
(convert::To<In,float>(input_ptr[in_idx])-mean[c]) * std[c]);
}
}
}
}
}
template <typename T_IN, typename T_OUT, class Context>
bool TransformOnGPU(
Tensor& X,
Tensor* Y,
Tensor& mean,
Tensor& std,
Context* context) {
const int N = X.dim32(0), C = X.dim32(3), H = X.dim32(1), W = X.dim32(2);
auto* input_data = X.template data<T_IN>();
auto* output_data = Y->template mutable_data<T_OUT>();
transform_kernel<
T_IN, T_OUT><<<N, dim3(16, 16), 0, context->cuda_stream()>>>(
C, H, W, mean.template data<float>(), std.template data<float>(),
input_data, output_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
};
template bool TransformOnGPU<uint8_t, float, CUDAContext>(
Tensor& X,
Tensor* Y,
Tensor& mean,
Tensor& std,
CUDAContext* context);
template bool TransformOnGPU<uint8_t, at::Half, CUDAContext>(
Tensor& X,
Tensor* Y,
Tensor& mean,
Tensor& std,
CUDAContext* context);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#ifndef CAFFE2_IMAGE_TRANSFORM_GPU_H_
#define CAFFE2_IMAGE_TRANSFORM_GPU_H_
/**
*
* Copyright (c) 2016, NVIDIA CORPORATION, All rights reserved
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**/
#include "caffe2/core/context.h"
namespace caffe2 {
template <typename T_IN, typename T_OUT, class Context>
bool TransformOnGPU(
Tensor& X,
Tensor* Y,
Tensor& mean,
Tensor& std,
Context* context);
} // namespace caffe2
#endif
### |
#ifndef CAFFE2_IMAGE_TRANSFORM_GPU_H_
#define CAFFE2_IMAGE_TRANSFORM_GPU_H_
/**
*
* Copyright (c) 2016, NVIDIA CORPORATION, All rights reserved
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**/
#include "caffe2/core/context.h"
namespace caffe2 {
template <typename T_IN, typename T_OUT, class Context>
bool TransformOnGPU(
Tensor& X,
Tensor* Y,
Tensor& mean,
Tensor& std,
Context* context);
} // namespace caffe2
#endif
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/abs_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
AbsGradientHIPKernel(const int N, const T* dY, const T* X, T* dX) {
HIP_1D_KERNEL_LOOP(i, N) {
#if __HIP_ARCH__ >= 350
dX[i] = __ldg(X + i) == T(0)
? T(0)
: (__ldg(X + i) > T(0) ? __ldg(dY + i) : -__ldg(dY + i));
#else
dX[i] = X[i] == T(0) ? T(0) : (X[i] > T(0) ? dY[i] : -dY[i]);
#endif
}
}
} // namespace
template <>
template <typename T>
bool AbsGradientFunctor<HIPContext>::Forward(
const std::vector<int>& X_dims,
const std::vector<int>& /* dY_dims */,
const T* X,
const T* dY,
T* dX,
HIPContext* context) const {
const int size = std::accumulate(
X_dims.cbegin(), X_dims.cend(), 1, std::multiplies<int>());
hipLaunchKernelGGL(( AbsGradientHIPKernel<T>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context->hip_stream(), size, dY, X, dX);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(
Abs,
UnaryElementwiseOp<
TensorTypes<float>,
HIPContext,
AbsFunctor<HIPContext>>);
REGISTER_HIP_OPERATOR(
AbsGradient,
BinaryElementwiseOp<
TensorTypes<float>,
HIPContext,
AbsGradientFunctor<HIPContext>>);
} // namespace caffe2
### |
#include "caffe2/operators/abs_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
AbsGradientCUDAKernel(const int N, const T* dY, const T* X, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = __ldg(X + i) == T(0)
? T(0)
: (__ldg(X + i) > T(0) ? __ldg(dY + i) : -__ldg(dY + i));
#else
dX[i] = X[i] == T(0) ? T(0) : (X[i] > T(0) ? dY[i] : -dY[i]);
#endif
}
}
} // namespace
template <>
template <typename T>
bool AbsGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& X_dims,
const std::vector<int>& /* dY_dims */,
const T* X,
const T* dY,
T* dX,
CUDAContext* context) const {
const int size = std::accumulate(
X_dims.cbegin(), X_dims.cend(), 1, std::multiplies<int>());
AbsGradientCUDAKernel<T>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, dY, X, dX);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(
Abs,
UnaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
AbsFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
AbsGradient,
BinaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
AbsGradientFunctor<CUDAContext>>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/accumulate_op.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(Accumulate, AccumulateOp<float, HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/accumulate_op.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(Accumulate, AccumulateOp<float, CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/core/TensorBase.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <ATen/hip\ApplyGridUtils.cuh>
#include <ATen/hip/detail\OffsetCalculator.cuh>
#include <ATen/native/hip\Loops.cuh>
namespace at::native {
namespace {
void hardshrink_kernel(TensorIteratorBase& iter, const Scalar& value) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"hardshrink_hip",
[&]() {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd] GPU_LAMBDA(scalar_t a) -> scalar_t {
return (a >= -lambd && a <= lambd) ? scalar_t(0) : a;
});
});
}
} // namespace
REGISTER_DISPATCH(hardshrink_stub, &hardshrink_kernel);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/core/TensorBase.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <ATen/cuda/ApplyGridUtils.cuh>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/native/cuda/Loops.cuh>
namespace at::native {
namespace {
void hardshrink_kernel(TensorIteratorBase& iter, const Scalar& value) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"hardshrink_cuda",
[&]() {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd] GPU_LAMBDA(scalar_t a) -> scalar_t {
return (a >= -lambd && a <= lambd) ? scalar_t(0) : a;
});
});
}
} // namespace
REGISTER_DISPATCH(hardshrink_stub, &hardshrink_kernel);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/accuracy_op.h"
#include "caffe2/utils/hip/GpuAtomics.cuh"
#include "caffe2/utils/math.h"
#include "caffe2/utils/cub_namespace.cuh"
#include <hipcub/hipcub.hpp>
namespace caffe2 {
namespace {
__global__ void AccuracyKernel(
const int N,
const int D,
const int top_k,
const float* Xdata,
const int* labelData,
float* accuracy) {
typedef hipcub::BlockReduce<int, CAFFE_HIP_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
int correct = 0;
for (int row = blockIdx.x; row < N; row += gridDim.x) {
const int label = labelData[row];
const float label_pred = Xdata[row * D + label];
int ngt = 0;
for (int col = threadIdx.x; col < D; col += blockDim.x) {
const float pred = Xdata[row * D + col];
if (pred > label_pred || (pred == label_pred && col <= label)) {
++ngt;
}
}
ngt = BlockReduce(temp_storage).Sum(ngt);
if (ngt <= top_k) {
++correct;
}
__syncthreads();
}
if (threadIdx.x == 0) {
gpu_atomic_add(accuracy, static_cast<float>(correct));
}
}
__global__ void AccuracyDivideKernel(const int N, float* accuracy) {
*accuracy /= N;
}
} // namespace
template <>
bool AccuracyOp<float, HIPContext>::RunOnDevice() {
auto& X = Input(PREDICTION);
auto& label = Input(LABEL);
CAFFE_ENFORCE_EQ(X.dim(), 2);
int N = X.dim32(0);
int D = X.dim32(1);
CAFFE_ENFORCE_EQ(label.dim(), 1);
CAFFE_ENFORCE_EQ(label.dim32(0), N);
auto* Y = Output(0, vector<int64_t>(), at::dtype<float>());
float* Ydata = Y->template mutable_data<float>();
math::Set<float, HIPContext>(1, 0, Ydata, &context_);
hipLaunchKernelGGL(( AccuracyKernel),
dim3(::min(CAFFE_MAXIMUM_NUM_BLOCKS, N)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context_.hip_stream(),
N, D, top_k_, X.data<float>(), label.data<int>(), Ydata);
C10_HIP_KERNEL_LAUNCH_CHECK();
// This is going to be executed only in one single kernel. Not very beautiful,
// but probably we have to do this?
hipLaunchKernelGGL(( AccuracyDivideKernel), dim3(1), dim3(1), 0, context_.hip_stream(),
N, Ydata);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(Accuracy, AccuracyOp<float, HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/accuracy_op.h"
#include "caffe2/utils/GpuAtomics.cuh"
#include "caffe2/utils/math.h"
#include "caffe2/utils/cub_namespace.cuh"
#include <cub/block/block_reduce.cuh>
namespace caffe2 {
namespace {
__global__ void AccuracyKernel(
const int N,
const int D,
const int top_k,
const float* Xdata,
const int* labelData,
float* accuracy) {
typedef cub::BlockReduce<int, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
int correct = 0;
for (int row = blockIdx.x; row < N; row += gridDim.x) {
const int label = labelData[row];
const float label_pred = Xdata[row * D + label];
int ngt = 0;
for (int col = threadIdx.x; col < D; col += blockDim.x) {
const float pred = Xdata[row * D + col];
if (pred > label_pred || (pred == label_pred && col <= label)) {
++ngt;
}
}
ngt = BlockReduce(temp_storage).Sum(ngt);
if (ngt <= top_k) {
++correct;
}
__syncthreads();
}
if (threadIdx.x == 0) {
gpu_atomic_add(accuracy, static_cast<float>(correct));
}
}
__global__ void AccuracyDivideKernel(const int N, float* accuracy) {
*accuracy /= N;
}
} // namespace
template <>
bool AccuracyOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(PREDICTION);
auto& label = Input(LABEL);
CAFFE_ENFORCE_EQ(X.dim(), 2);
int N = X.dim32(0);
int D = X.dim32(1);
CAFFE_ENFORCE_EQ(label.dim(), 1);
CAFFE_ENFORCE_EQ(label.dim32(0), N);
auto* Y = Output(0, vector<int64_t>(), at::dtype<float>());
float* Ydata = Y->template mutable_data<float>();
math::Set<float, CUDAContext>(1, 0, Ydata, &context_);
AccuracyKernel<<<
std::min(CAFFE_MAXIMUM_NUM_BLOCKS, N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N, D, top_k_, X.data<float>(), label.data<int>(), Ydata);
C10_CUDA_KERNEL_LAUNCH_CHECK();
// This is going to be executed only in one single kernel. Not very beautiful,
// but probably we have to do this?
AccuracyDivideKernel<<<1, 1, 0, context_.cuda_stream()>>>(
N, Ydata);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(Accuracy, AccuracyOp<float, CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/acos_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
namespace {
__global__ void AcosGradientHIPKernel(
const int N,
const float* dY,
const float* X,
float* dX) {
HIP_1D_KERNEL_LOOP(i, N) {
#if __HIP_ARCH__ >= 350
dX[i] = -__ldg(dY + i) * rsqrtf(1.0f - __ldg(X + i) * __ldg(X + i));
#else
dX[i] = -dY[i] * rsqrtf(1.0f - X[i] * X[i]);
#endif
}
}
} // namespace
template <>
template <typename T>
bool AcosGradientFunctor<HIPContext>::Forward(
const std::vector<int>& X_dims,
const std::vector<int>& /* dY_dims */,
const T* X,
const T* dY,
T* dX,
HIPContext* context) const {
const int size = std::accumulate(
X_dims.cbegin(), X_dims.cend(), 1, std::multiplies<int>());
hipLaunchKernelGGL(( AcosGradientHIPKernel),
dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context->hip_stream(), size, dY, X, dX);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(
Acos,
UnaryElementwiseOp<
TensorTypes<float>,
HIPContext,
AcosFunctor<HIPContext>>);
REGISTER_HIP_OPERATOR(
AcosGradient,
BinaryElementwiseOp<
TensorTypes<float>,
HIPContext,
AcosGradientFunctor<HIPContext>>);
} // namespace caffe2
### |
#include "caffe2/operators/acos_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
__global__ void AcosGradientCUDAKernel(
const int N,
const float* dY,
const float* X,
float* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = -__ldg(dY + i) * rsqrtf(1.0f - __ldg(X + i) * __ldg(X + i));
#else
dX[i] = -dY[i] * rsqrtf(1.0f - X[i] * X[i]);
#endif
}
}
} // namespace
template <>
template <typename T>
bool AcosGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& X_dims,
const std::vector<int>& /* dY_dims */,
const T* X,
const T* dY,
T* dX,
CUDAContext* context) const {
const int size = std::accumulate(
X_dims.cbegin(), X_dims.cend(), 1, std::multiplies<int>());
AcosGradientCUDAKernel<<<
CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, dY, X, dX);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(
Acos,
UnaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
AcosFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
AcosGradient,
BinaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
AcosGradientFunctor<CUDAContext>>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/alias_with_name.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(AliasWithName, AliasWithNameOp<HIPContext>);
} // namespace caffe2
C10_EXPORT_CAFFE2_OP_TO_C10_HIP(
AliasWithName,
caffe2::AliasWithNameOp<caffe2::HIPContext>);
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/alias_with_name.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(AliasWithName, AliasWithNameOp<CUDAContext>);
} // namespace caffe2
C10_EXPORT_CAFFE2_OP_TO_C10_CUDA(
AliasWithName,
caffe2::AliasWithNameOp<caffe2::CUDAContext>);
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/asin_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
namespace {
__global__ void AsinGradientHIPKernel(
const int N,
const float* dY,
const float* X,
float* dX) {
HIP_1D_KERNEL_LOOP(i, N) {
#if __HIP_ARCH__ >= 350
dX[i] = __ldg(dY + i) * rsqrtf(1.0f - __ldg(X + i) * __ldg(X + i));
#else
dX[i] = dY[i] * rsqrtf(1.0f - X[i] * X[i]);
#endif
}
}
} // namespace
template <>
template <typename T>
bool AsinGradientFunctor<HIPContext>::Forward(
const std::vector<int>& X_dims,
const std::vector<int>& /* dY_dims */,
const T* X,
const T* dY,
T* dX,
HIPContext* context) const {
const int size = std::accumulate(
X_dims.cbegin(), X_dims.cend(), 1, std::multiplies<int>());
hipLaunchKernelGGL(( AsinGradientHIPKernel),
dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context->hip_stream(), size, dY, X, dX);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(
Asin,
UnaryElementwiseOp<
TensorTypes<float>,
HIPContext,
AsinFunctor<HIPContext>>);
REGISTER_HIP_OPERATOR(
AsinGradient,
BinaryElementwiseOp<
TensorTypes<float>,
HIPContext,
AsinGradientFunctor<HIPContext>>);
} // namespace caffe2
### |
#include "caffe2/operators/asin_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
__global__ void AsinGradientCUDAKernel(
const int N,
const float* dY,
const float* X,
float* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = __ldg(dY + i) * rsqrtf(1.0f - __ldg(X + i) * __ldg(X + i));
#else
dX[i] = dY[i] * rsqrtf(1.0f - X[i] * X[i]);
#endif
}
}
} // namespace
template <>
template <typename T>
bool AsinGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& X_dims,
const std::vector<int>& /* dY_dims */,
const T* X,
const T* dY,
T* dX,
CUDAContext* context) const {
const int size = std::accumulate(
X_dims.cbegin(), X_dims.cend(), 1, std::multiplies<int>());
AsinGradientCUDAKernel<<<
CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, dY, X, dX);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(
Asin,
UnaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
AsinFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
AsinGradient,
BinaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
AsinGradientFunctor<CUDAContext>>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/assert_op.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(Assert, AssertOp<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/assert_op.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(Assert, AssertOp<CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/async_net_barrier_op.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(AsyncNetBarrier, AsyncNetBarrierOp<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/async_net_barrier_op.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(AsyncNetBarrier, AsyncNetBarrierOp<CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/atan_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
AtanGradientHIPKernel(const int N, const T* dY, const T* X, T* dX) {
HIP_1D_KERNEL_LOOP(i, N) {
#if __HIP_ARCH__ >= 350
dX[i] = __ldg(dY + i) / (T(1) + __ldg(X + i) * __ldg(X + i));
#else
dX[i] = dY[i] / (T(1) + X[i] * X[i]);
#endif
}
}
} // namespace
template <>
template <typename T>
bool AtanGradientFunctor<HIPContext>::Forward(
const std::vector<int>& X_dims,
const std::vector<int>& /* dY_dims */,
const T* X,
const T* dY,
T* dX,
HIPContext* context) const {
const int size = std::accumulate(
X_dims.cbegin(), X_dims.cend(), 1, std::multiplies<int>());
hipLaunchKernelGGL(( AtanGradientHIPKernel<T>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context->hip_stream(), size, dY, X, dX);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(
Atan,
UnaryElementwiseOp<
TensorTypes<float>,
HIPContext,
AtanFunctor<HIPContext>>);
REGISTER_HIP_OPERATOR(
AtanGradient,
BinaryElementwiseOp<
TensorTypes<float>,
HIPContext,
AtanGradientFunctor<HIPContext>>);
} // namespace caffe2
### |
#include "caffe2/operators/atan_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
AtanGradientCUDAKernel(const int N, const T* dY, const T* X, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = __ldg(dY + i) / (T(1) + __ldg(X + i) * __ldg(X + i));
#else
dX[i] = dY[i] / (T(1) + X[i] * X[i]);
#endif
}
}
} // namespace
template <>
template <typename T>
bool AtanGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& X_dims,
const std::vector<int>& /* dY_dims */,
const T* X,
const T* dY,
T* dX,
CUDAContext* context) const {
const int size = std::accumulate(
X_dims.cbegin(), X_dims.cend(), 1, std::multiplies<int>());
AtanGradientCUDAKernel<T>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, dY, X, dX);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(
Atan,
UnaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
AtanFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
AtanGradient,
BinaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
AtanGradientFunctor<CUDAContext>>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/operators/batch_matmul_op.h"
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
template <>
bool BatchMatMulOp<HIPContext, DefaultEngine>::RunOnDevice() {
return DispatchHelper<TensorTypes<float, at::Half>>::call(this, Input(0));
}
REGISTER_HIP_OPERATOR(BatchMatMul, BatchMatMulOp<HIPContext>);
#if !defined(USE_ROCM)
template <>
bool BatchMatMulOp<HIPContext, TensorCoreEngine>::RunOnDevice() {
return DispatchHelper<TensorTypes<float, at::Half>>::call(this, Input(0));
}
REGISTER_HIP_OPERATOR_WITH_ENGINE(
BatchMatMul,
TENSORCORE,
BatchMatMulOp<HIPContext, TensorCoreEngine>);
#endif
} // namespace caffe2
### |
#include "caffe2/operators/batch_matmul_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
template <>
bool BatchMatMulOp<CUDAContext, DefaultEngine>::RunOnDevice() {
return DispatchHelper<TensorTypes<float, at::Half>>::call(this, Input(0));
}
REGISTER_CUDA_OPERATOR(BatchMatMul, BatchMatMulOp<CUDAContext>);
#if !defined(USE_ROCM)
template <>
bool BatchMatMulOp<CUDAContext, TensorCoreEngine>::RunOnDevice() {
return DispatchHelper<TensorTypes<float, at::Half>>::call(this, Input(0));
}
REGISTER_CUDA_OPERATOR_WITH_ENGINE(
BatchMatMul,
TENSORCORE,
BatchMatMulOp<CUDAContext, TensorCoreEngine>);
#endif
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/bucketize_op.h"
#include <thrust/binary_search.h>
#include <thrust/device_vector.h>
namespace caffe2 {
__global__ void BucketizeOpKernel(
const int N,
const int M,
const float* bounds,
const float* X,
int32_t* out) {
HIP_1D_KERNEL_LOOP(i, N) {
int32_t low = -1, high = M;
while (high - low > 1) {
const int32_t median = low + (high - low) / 2;
if (bounds[median] < X[i]) {
low = median;
} else {
high = median;
}
}
out[i] = high;
}
}
template <>
bool BucketizeOp<HIPContext>::RunOnDevice() {
auto& input = Input(X);
CAFFE_ENFORCE_GE(input.dim(), 1);
auto N = input.numel();
auto* output = Output(INDICES, input.sizes(), at::dtype<int32_t>());
const auto* input_data = input.template data<float>();
auto* output_data = output->template mutable_data<int32_t>();
hipLaunchKernelGGL(( BucketizeOpKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context_.hip_stream(),
N,
boundaries_device_.numel(),
boundaries_device_.data<float>(),
input_data,
output_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
};
REGISTER_HIP_OPERATOR(Bucketize, BucketizeOp<HIPContext>);
} // namespace caffe2
using BucketizeHIP = caffe2::BucketizeOp<caffe2::HIPContext>;
C10_EXPORT_CAFFE2_OP_TO_C10_HIP(
Bucketize,
BucketizeHIP);
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/bucketize_op.h"
#include <thrust/binary_search.h>
#include <thrust/device_vector.h>
namespace caffe2 {
__global__ void BucketizeOpKernel(
const int N,
const int M,
const float* bounds,
const float* X,
int32_t* out) {
CUDA_1D_KERNEL_LOOP(i, N) {
int32_t low = -1, high = M;
while (high - low > 1) {
const int32_t median = low + (high - low) / 2;
if (bounds[median] < X[i]) {
low = median;
} else {
high = median;
}
}
out[i] = high;
}
}
template <>
bool BucketizeOp<CUDAContext>::RunOnDevice() {
auto& input = Input(X);
CAFFE_ENFORCE_GE(input.dim(), 1);
auto N = input.numel();
auto* output = Output(INDICES, input.sizes(), at::dtype<int32_t>());
const auto* input_data = input.template data<float>();
auto* output_data = output->template mutable_data<int32_t>();
BucketizeOpKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
boundaries_device_.numel(),
boundaries_device_.data<float>(),
input_data,
output_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
};
REGISTER_CUDA_OPERATOR(Bucketize, BucketizeOp<CUDAContext>);
} // namespace caffe2
using BucketizeCUDA = caffe2::BucketizeOp<caffe2::CUDAContext>;
C10_EXPORT_CAFFE2_OP_TO_C10_CUDA(
Bucketize,
BucketizeCUDA);
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/cbrt_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
CbrtGradientHIPKernel(const int N, const T* dY, const T* Y, T* dX) {
HIP_1D_KERNEL_LOOP(i, N) {
#if __HIP_ARCH__ >= 350
dX[i] = __ldg(dY + i) / (__ldg(Y + i) * __ldg(Y + i) * T(3));
#else
dX[i] = dY[i] / (Y[i] * Y[i] * T(3));
#endif
}
}
} // namespace
template <>
template <typename T>
bool CbrtGradientFunctor<HIPContext>::Forward(
const std::vector<int>& dY_dims,
const std::vector<int>& /* Y_dims */,
const T* dY,
const T* Y,
T* dX,
HIPContext* context) const {
const int size = std::accumulate(
dY_dims.cbegin(), dY_dims.cend(), 1, std::multiplies<int>());
hipLaunchKernelGGL(( CbrtGradientHIPKernel<T>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context->hip_stream(), size, dY, Y, dX);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(
Cbrt,
UnaryElementwiseOp<
TensorTypes<float>,
HIPContext,
CbrtFunctor<HIPContext>>);
REGISTER_HIP_OPERATOR(
CbrtGradient,
BinaryElementwiseOp<
TensorTypes<float>,
HIPContext,
CbrtGradientFunctor<HIPContext>>);
} // namespace caffe2
### |
#include "caffe2/operators/cbrt_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
CbrtGradientCUDAKernel(const int N, const T* dY, const T* Y, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = __ldg(dY + i) / (__ldg(Y + i) * __ldg(Y + i) * T(3));
#else
dX[i] = dY[i] / (Y[i] * Y[i] * T(3));
#endif
}
}
} // namespace
template <>
template <typename T>
bool CbrtGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& dY_dims,
const std::vector<int>& /* Y_dims */,
const T* dY,
const T* Y,
T* dX,
CUDAContext* context) const {
const int size = std::accumulate(
dY_dims.cbegin(), dY_dims.cend(), 1, std::multiplies<int>());
CbrtGradientCUDAKernel<T>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, dY, Y, dX);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(
Cbrt,
UnaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
CbrtFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
CbrtGradient,
BinaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
CbrtGradientFunctor<CUDAContext>>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/core/TensorBase.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <ATen/hip\ApplyGridUtils.cuh>
#include <ATen/hip/detail\OffsetCalculator.cuh>
#include <ATen/native/hip\Loops.cuh>
namespace at::native {
namespace {
void hardsigmoid_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"hardsigmoid_hip",
[&]() {
using opmath_t = at::opmath_type<scalar_t>;
const opmath_t zero(0.0f);
const opmath_t one_sixth(1.0f / 6.0f);
const opmath_t three(3.0f);
const opmath_t six(6.0f);
gpu_kernel(
iter,
[zero, one_sixth, three, six] GPU_LAMBDA(
scalar_t self_val) -> scalar_t {
opmath_t x = static_cast<opmath_t>(self_val);
return ::min(::max(x + three, zero), six) * one_sixth;
});
});
}
void hardsigmoid_backward_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"hardsigmoid_backward_hip",
[&]() {
using opmath_t = at::opmath_type<scalar_t>;
const opmath_t zero(0.0f);
const opmath_t three(3.0f);
const opmath_t neg_three(-3.0f);
const opmath_t one_sixth(1.0f / 6.0f);
gpu_kernel(
iter,
[zero, three, neg_three, one_sixth] GPU_LAMBDA(
scalar_t grad_val_, scalar_t self_val_) -> scalar_t {
opmath_t grad_val = static_cast<opmath_t>(grad_val_);
opmath_t self_val = static_cast<opmath_t>(self_val_);
return (self_val > neg_three && self_val < three)
? grad_val * one_sixth
: zero;
});
});
}
} // namespace
REGISTER_DISPATCH(hardsigmoid_stub, &hardsigmoid_kernel);
REGISTER_DISPATCH(hardsigmoid_backward_stub, &hardsigmoid_backward_kernel);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/core/TensorBase.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <ATen/cuda/ApplyGridUtils.cuh>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/native/cuda/Loops.cuh>
namespace at::native {
namespace {
void hardsigmoid_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"hardsigmoid_cuda",
[&]() {
using opmath_t = at::opmath_type<scalar_t>;
const opmath_t zero(0.0f);
const opmath_t one_sixth(1.0f / 6.0f);
const opmath_t three(3.0f);
const opmath_t six(6.0f);
gpu_kernel(
iter,
[zero, one_sixth, three, six] GPU_LAMBDA(
scalar_t self_val) -> scalar_t {
opmath_t x = static_cast<opmath_t>(self_val);
return std::min(std::max(x + three, zero), six) * one_sixth;
});
});
}
void hardsigmoid_backward_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"hardsigmoid_backward_cuda",
[&]() {
using opmath_t = at::opmath_type<scalar_t>;
const opmath_t zero(0.0f);
const opmath_t three(3.0f);
const opmath_t neg_three(-3.0f);
const opmath_t one_sixth(1.0f / 6.0f);
gpu_kernel(
iter,
[zero, three, neg_three, one_sixth] GPU_LAMBDA(
scalar_t grad_val_, scalar_t self_val_) -> scalar_t {
opmath_t grad_val = static_cast<opmath_t>(grad_val_);
opmath_t self_val = static_cast<opmath_t>(self_val_);
return (self_val > neg_three && self_val < three)
? grad_val * one_sixth
: zero;
});
});
}
} // namespace
REGISTER_DISPATCH(hardsigmoid_stub, &hardsigmoid_kernel);
REGISTER_DISPATCH(hardsigmoid_backward_stub, &hardsigmoid_backward_kernel);
} // namespace at::native
### |