hip
stringlengths 140
3.32k
| cuda
stringlengths 84
3.33k
|
---|---|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/ceil_op.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T>
__global__ void CeilKernel(const int N, const T* X, T* Y) {
HIP_1D_KERNEL_LOOP(i, N) {
Y[i] = ::ceil(X[i]);
}
}
template <>
bool CeilOp<float, HIPContext>::RunOnDevice() {
auto& X = Input(0);
CAFFE_ENFORCE_GT(X.numel(), 0);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
hipLaunchKernelGGL(( CeilKernel),
dim3(CAFFE_GET_BLOCKS(X.numel())),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context_.hip_stream(),
X.numel(), X.data<float>(), Y->template mutable_data<float>());
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(Ceil, CeilOp<float, HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/ceil_op.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T>
__global__ void CeilKernel(const int N, const T* X, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = std::ceil(X[i]);
}
}
template <>
bool CeilOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
CAFFE_ENFORCE_GT(X.numel(), 0);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
CeilKernel<<<
CAFFE_GET_BLOCKS(X.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
X.numel(), X.data<float>(), Y->template mutable_data<float>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(Ceil, CeilOp<float, CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/clip_op.h"
namespace caffe2 {
namespace {
template <typename T>
__device__ T hip_min(T x, T y);
template <typename T>
__device__ T hip_max(T x, T y);
template <>
__device__ float hip_min(float x, float y) { return fminf(x, y); }
template <>
__device__ float hip_max(float x, float y) { return fmaxf(x, y); }
// Disabled since we don't use it right now.
/*
template <>
__device__ double hip_min(double x, double y) { return fmin(x, y); }
template <>
__device__ double hip_max(double x, double y) { return fmax(x, y); }
*/
template <typename T>
__global__ void ClipKernel(const int N, const T minval, const T maxval,
const T* X, T* Y) {
HIP_1D_KERNEL_LOOP(i, N) {
Y[i] = hip_min<T>(hip_max<T>(X[i], minval), maxval);
}
}
template <typename T>
__global__ void ClipGradientKernel(const int N, const T minval,
const T maxval, const T* Y,
const T* dY, T* dX) {
HIP_1D_KERNEL_LOOP(i, N) {
dX[i] = dY[i] * (Y[i] > minval && Y[i] < maxval);
}
}
} // namespace
template <>
bool ClipOp<float, HIPContext>::RunOnDevice() {
auto& X = Input(0);
CAFFE_ENFORCE_GE(X.numel(), 0);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
hipLaunchKernelGGL(( ClipKernel),
dim3(CAFFE_GET_BLOCKS(X.numel())),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context_.hip_stream(),
X.numel(), min_, max_, X.data<float>(), Y->template mutable_data<float>());
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
bool ClipGradientOp<float, HIPContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
CAFFE_ENFORCE_GE(Y.numel(), 0);
CAFFE_ENFORCE_EQ(dY.numel(), Y.numel());
auto* dX = Output(0, Y.sizes(), at::dtype<float>());
hipLaunchKernelGGL(( ClipGradientKernel),
dim3(CAFFE_GET_BLOCKS(Y.numel())),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context_.hip_stream(),
Y.numel(),
min_,
max_,
Y.data<float>(),
dY.data<float>(),
dX->template mutable_data<float>());
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(Clip, ClipOp<float, HIPContext>);
REGISTER_HIP_OPERATOR(ClipGradient, ClipGradientOp<float, HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/clip_op.h"
namespace caffe2 {
namespace {
template <typename T>
__device__ T cuda_min(T x, T y);
template <typename T>
__device__ T cuda_max(T x, T y);
template <>
__device__ float cuda_min(float x, float y) { return fminf(x, y); }
template <>
__device__ float cuda_max(float x, float y) { return fmaxf(x, y); }
// Disabled since we don't use it right now.
/*
template <>
__device__ double cuda_min(double x, double y) { return fmin(x, y); }
template <>
__device__ double cuda_max(double x, double y) { return fmax(x, y); }
*/
template <typename T>
__global__ void ClipKernel(const int N, const T minval, const T maxval,
const T* X, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = cuda_min<T>(cuda_max<T>(X[i], minval), maxval);
}
}
template <typename T>
__global__ void ClipGradientKernel(const int N, const T minval,
const T maxval, const T* Y,
const T* dY, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
dX[i] = dY[i] * (Y[i] > minval && Y[i] < maxval);
}
}
} // namespace
template <>
bool ClipOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
CAFFE_ENFORCE_GE(X.numel(), 0);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
ClipKernel<<<
CAFFE_GET_BLOCKS(X.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
X.numel(), min_, max_, X.data<float>(), Y->template mutable_data<float>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
bool ClipGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
CAFFE_ENFORCE_GE(Y.numel(), 0);
CAFFE_ENFORCE_EQ(dY.numel(), Y.numel());
auto* dX = Output(0, Y.sizes(), at::dtype<float>());
ClipGradientKernel<<<
CAFFE_GET_BLOCKS(Y.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
Y.numel(),
min_,
max_,
Y.data<float>(),
dY.data<float>(),
dX->template mutable_data<float>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(Clip, ClipOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(ClipGradient, ClipGradientOp<float, CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/no_default_engine_op.h"
namespace caffe2 {
// Communication operators do not have default engines.
REGISTER_HIP_OPERATOR(CreateCommonWorld, NoDefaultEngineOp<HIPContext>);
REGISTER_HIP_OPERATOR(CloneCommonWorld, NoDefaultEngineOp<HIPContext>);
REGISTER_HIP_OPERATOR(Broadcast, NoDefaultEngineOp<HIPContext>);
REGISTER_HIP_OPERATOR(Reduce, NoDefaultEngineOp<HIPContext>);
REGISTER_HIP_OPERATOR(Allgather, NoDefaultEngineOp<HIPContext>);
REGISTER_HIP_OPERATOR(Allreduce, NoDefaultEngineOp<HIPContext>);
REGISTER_HIP_OPERATOR(SendTensor, NoDefaultEngineOp<HIPContext>);
REGISTER_HIP_OPERATOR(ReceiveTensor, NoDefaultEngineOp<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/no_default_engine_op.h"
namespace caffe2 {
// Communication operators do not have default engines.
REGISTER_CUDA_OPERATOR(CreateCommonWorld, NoDefaultEngineOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(CloneCommonWorld, NoDefaultEngineOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(Broadcast, NoDefaultEngineOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(Reduce, NoDefaultEngineOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(Allgather, NoDefaultEngineOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(Allreduce, NoDefaultEngineOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(SendTensor, NoDefaultEngineOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(ReceiveTensor, NoDefaultEngineOp<CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/concat_split_op.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(Split, SplitOp<HIPContext>);
REGISTER_HIP_OPERATOR(Concat, ConcatOp<HIPContext>);
// Backward compatibility settings
REGISTER_HIP_OPERATOR(DepthSplit, SplitOp<HIPContext>);
REGISTER_HIP_OPERATOR(DepthConcat, ConcatOp<HIPContext>);
REGISTER_HIP_OPERATOR(SplitByLengths, SplitByLengthsOp<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/concat_split_op.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(Split, SplitOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(Concat, ConcatOp<CUDAContext>);
// Backward compatibility settings
REGISTER_CUDA_OPERATOR(DepthSplit, SplitOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(DepthConcat, ConcatOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(SplitByLengths, SplitByLengthsOp<CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/operators/conv_op.h"
#include "caffe2/operators/conv_op_impl.h"
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(Conv, ConvOp<float, HIPContext>);
REGISTER_HIP_OPERATOR(ConvGradient, ConvGradientOp<float, HIPContext>);
REGISTER_HIP_OPERATOR(Conv1D, ConvOp<float, HIPContext>);
REGISTER_HIP_OPERATOR(Conv1DGradient, ConvGradientOp<float, HIPContext>);
REGISTER_HIP_OPERATOR(Conv2D, ConvOp<float, HIPContext>);
REGISTER_HIP_OPERATOR(Conv2DGradient, ConvGradientOp<float, HIPContext>);
REGISTER_HIP_OPERATOR(Conv3D, ConvOp<float, HIPContext>);
REGISTER_HIP_OPERATOR(Conv3DGradient, ConvGradientOp<float, HIPContext>);
} // namespace caffe2
### |
#include "caffe2/operators/conv_op.h"
#include "caffe2/operators/conv_op_impl.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(Conv, ConvOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(ConvGradient, ConvGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(Conv1D, ConvOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(Conv1DGradient, ConvGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(Conv2D, ConvOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(Conv2DGradient, ConvGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(Conv3D, ConvOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(Conv3DGradient, ConvGradientOp<float, CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/conv_op_shared.h"
namespace caffe2 {
template <>
void createSharedBuffer<HIPContext>(Workspace* ws) {
auto* mutexPtr = ws->CreateBlob("__CAFFE2_SHARED_CONV_BUFFER_HIP_MUTEX__")
->GetMutable<std::unique_ptr<std::mutex>>();
mutexPtr->reset(new std::mutex());
ws->CreateBlob("__CAFFE2_SHARED_CONV_BUFFER_HIP__");
}
template <>
void runWithSharedBuffer<HIPContext>(
Workspace* ws,
std::function<void(Tensor* buffer)> f) {
auto* mutexBlob = ws->GetBlob("__CAFFE2_SHARED_CONV_BUFFER_HIP_MUTEX__");
CAFFE_ENFORCE(mutexBlob, "Must call createSharedBuffer() first");
auto* mutexPtr = mutexBlob->GetMutable<std::unique_ptr<std::mutex>>();
std::lock_guard<std::mutex> g(**mutexPtr);
auto* buffer = BlobGetMutableTensor(
ws->GetBlob("__CAFFE2_SHARED_CONV_BUFFER_HIP__"), HIP);
f(buffer);
}
}
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/conv_op_shared.h"
namespace caffe2 {
template <>
void createSharedBuffer<CUDAContext>(Workspace* ws) {
auto* mutexPtr = ws->CreateBlob("__CAFFE2_SHARED_CONV_BUFFER_CUDA_MUTEX__")
->GetMutable<std::unique_ptr<std::mutex>>();
mutexPtr->reset(new std::mutex());
ws->CreateBlob("__CAFFE2_SHARED_CONV_BUFFER_CUDA__");
}
template <>
void runWithSharedBuffer<CUDAContext>(
Workspace* ws,
std::function<void(Tensor* buffer)> f) {
auto* mutexBlob = ws->GetBlob("__CAFFE2_SHARED_CONV_BUFFER_CUDA_MUTEX__");
CAFFE_ENFORCE(mutexBlob, "Must call createSharedBuffer() first");
auto* mutexPtr = mutexBlob->GetMutable<std::unique_ptr<std::mutex>>();
std::lock_guard<std::mutex> g(**mutexPtr);
auto* buffer = BlobGetMutableTensor(
ws->GetBlob("__CAFFE2_SHARED_CONV_BUFFER_CUDA__"), CUDA);
f(buffer);
}
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/conv_transpose_op.h"
#include "caffe2/operators/conv_transpose_op_impl.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(ConvTranspose, ConvTransposeOp<float, HIPContext>);
REGISTER_HIP_OPERATOR(
ConvTransposeGradient,
ConvTransposeGradientOp<float, HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/conv_transpose_op.h"
#include "caffe2/operators/conv_transpose_op_impl.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(ConvTranspose, ConvTransposeOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
ConvTransposeGradient,
ConvTransposeGradientOp<float, CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/copy_op.h"
namespace caffe2 {
template <>
class CopyOnDeviceLikeOp<HIPContext, HIPContext, HIPContext>
: public Operator<HIPContext> {
public:
template <class... Args>
explicit CopyOnDeviceLikeOp(Args&&... args)
: Operator<HIPContext>(std::forward<Args>(args)...) {}
USE_OPERATOR_FUNCTIONS(HIPContext);
bool RunOnDevice() override {
auto& input = Input(0);
auto* output = OperatorBase::Output<Tensor>(0, HIP);
HIPContext context(GetGPUIDForPointer(Input(1).raw_data()));
output->ResizeLike(input);
context.template CopyItems<HIPContext, HIPContext>(
input.meta(),
input.numel(),
input.raw_data(),
output->raw_mutable_data(input.meta()));
return true;
}
};
// From CPU, copy it to whatever the current context
REGISTER_HIP_OPERATOR(
CopyFromCPUInput,
CopyOp<HIPContext, HIPContext, CPUContext>);
// CopyGPUToCPU and CopyCPUToGPU should both be carried out in a cuda context,
// since gpu code will be involved.
REGISTER_HIP_OPERATOR(
CopyGPUToCPU,
CopyOp<HIPContext, CPUContext, HIPContext>);
REGISTER_HIP_OPERATOR(
CopyCPUToGPU,
CopyOp<HIPContext, HIPContext, CPUContext>);
// If we only specify Copy, we assume that it is a gpu to gpu copy - maybe
// involving different GPUs.
REGISTER_HIP_OPERATOR(Copy, CopyOp<HIPContext, HIPContext, HIPContext>);
REGISTER_HIP_OPERATOR(
CopyOnDeviceLike,
CopyOnDeviceLikeOp<HIPContext, HIPContext, HIPContext>);
} // namespace caffe2
using CopyGPUToCPU_HIP = caffe2::
CopyOp<caffe2::HIPContext, caffe2::CPUContext, caffe2::HIPContext>;
using CopyCPUToGPU_HIP = caffe2::
CopyOp<caffe2::HIPContext, caffe2::HIPContext, caffe2::CPUContext>;
C10_EXPORT_CAFFE2_OP_TO_C10_HIP(CopyGPUToCPU, CopyGPUToCPU_HIP);
C10_EXPORT_CAFFE2_OP_TO_C10_CPU_KERNEL_ONLY(CopyCPUToGPU, CopyCPUToGPU_HIP);
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/copy_op.h"
namespace caffe2 {
template <>
class CopyOnDeviceLikeOp<CUDAContext, CUDAContext, CUDAContext>
: public Operator<CUDAContext> {
public:
template <class... Args>
explicit CopyOnDeviceLikeOp(Args&&... args)
: Operator<CUDAContext>(std::forward<Args>(args)...) {}
USE_OPERATOR_FUNCTIONS(CUDAContext);
bool RunOnDevice() override {
auto& input = Input(0);
auto* output = OperatorBase::Output<Tensor>(0, CUDA);
CUDAContext context(GetGPUIDForPointer(Input(1).raw_data()));
output->ResizeLike(input);
context.template CopyItems<CUDAContext, CUDAContext>(
input.meta(),
input.numel(),
input.raw_data(),
output->raw_mutable_data(input.meta()));
return true;
}
};
// From CPU, copy it to whatever the current context
REGISTER_CUDA_OPERATOR(
CopyFromCPUInput,
CopyOp<CUDAContext, CUDAContext, CPUContext>);
// CopyGPUToCPU and CopyCPUToGPU should both be carried out in a cuda context,
// since gpu code will be involved.
REGISTER_CUDA_OPERATOR(
CopyGPUToCPU,
CopyOp<CUDAContext, CPUContext, CUDAContext>);
REGISTER_CUDA_OPERATOR(
CopyCPUToGPU,
CopyOp<CUDAContext, CUDAContext, CPUContext>);
// If we only specify Copy, we assume that it is a gpu to gpu copy - maybe
// involving different GPUs.
REGISTER_CUDA_OPERATOR(Copy, CopyOp<CUDAContext, CUDAContext, CUDAContext>);
REGISTER_CUDA_OPERATOR(
CopyOnDeviceLike,
CopyOnDeviceLikeOp<CUDAContext, CUDAContext, CUDAContext>);
} // namespace caffe2
using CopyGPUToCPU_CUDA = caffe2::
CopyOp<caffe2::CUDAContext, caffe2::CPUContext, caffe2::CUDAContext>;
using CopyCPUToGPU_CUDA = caffe2::
CopyOp<caffe2::CUDAContext, caffe2::CUDAContext, caffe2::CPUContext>;
C10_EXPORT_CAFFE2_OP_TO_C10_CUDA(CopyGPUToCPU, CopyGPUToCPU_CUDA);
C10_EXPORT_CAFFE2_OP_TO_C10_CPU_KERNEL_ONLY(CopyCPUToGPU, CopyCPUToGPU_CUDA);
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/cosh_op.h"
#include <c10/util/accumulate.h>
#include "caffe2/core/hip/context_gpu.h"
#include <algorithm>
#include <functional>
namespace caffe2 {
namespace {
__global__ void CoshGradientHIPKernel(
const int N,
const float* dY,
const float* X,
float* dX) {
HIP_1D_KERNEL_LOOP(i, N) {
#if __HIP_ARCH__ >= 350
dX[i] = __ldg(dY + i) * sinhf(__ldg(X + i));
#else
dX[i] = dY[i] * sinhf(X[i]);
#endif
}
}
} // namespace
template <>
template <typename T>
bool CoshGradientFunctor<HIPContext>::Forward(
const std::vector<int>& /* dY_dims */,
const std::vector<int>& X_dims,
const T* dY,
const T* X,
T* dX,
HIPContext* context) const {
const auto size = c10::multiply_integers(X_dims.cbegin(), X_dims.cend());
hipLaunchKernelGGL(( CoshGradientHIPKernel),
dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context->hip_stream(), size, dY, X, dX);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(
Cosh,
UnaryElementwiseOp<
TensorTypes<float>,
HIPContext,
CoshFunctor<HIPContext>>);
REGISTER_HIP_OPERATOR(
CoshGradient,
BinaryElementwiseOp<
TensorTypes<float>,
HIPContext,
CoshGradientFunctor<HIPContext>>);
} // namespace caffe2
### |
#include "caffe2/operators/cosh_op.h"
#include <c10/util/accumulate.h>
#include "caffe2/core/context_gpu.h"
#include <algorithm>
#include <functional>
namespace caffe2 {
namespace {
__global__ void CoshGradientCUDAKernel(
const int N,
const float* dY,
const float* X,
float* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = __ldg(dY + i) * sinhf(__ldg(X + i));
#else
dX[i] = dY[i] * sinhf(X[i]);
#endif
}
}
} // namespace
template <>
template <typename T>
bool CoshGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& /* dY_dims */,
const std::vector<int>& X_dims,
const T* dY,
const T* X,
T* dX,
CUDAContext* context) const {
const auto size = c10::multiply_integers(X_dims.cbegin(), X_dims.cend());
CoshGradientCUDAKernel<<<
CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, dY, X, dX);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(
Cosh,
UnaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
CoshFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
CoshGradient,
BinaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
CoshGradientFunctor<CUDAContext>>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/cosine_embedding_criterion_op.h"
namespace caffe2 {
namespace {
__global__ void CECKernel(
const int N, const float* S, const int* Y, const float margin,
float* output) {
HIP_1D_KERNEL_LOOP(i, N) {
output[i] = Y[i] == 1 ? (1. - S[i]) : fmaxf(0.f, S[i] - margin);
}
}
__global__ void CECGradientKernel(
const int N, const float* S, const int* Y, const float* dOutput,
const float margin, float* dS) {
HIP_1D_KERNEL_LOOP(i, N) {
dS[i] = dOutput[i] * (Y[i] == 1 ? -1 : static_cast<float>(S[i] >= margin));
}
}
} // namespace
template <>
bool CosineEmbeddingCriterionOp<HIPContext>::RunOnDevice() {
auto& S = Input(0);
auto& Y = Input(1);
CAFFE_ENFORCE(S.numel() == Y.numel(),
"The embedding and label should have the same size.");
auto* output = Output(0, S.sizes(), at::dtype<float>());
const float* Sdata = S.data<float>();
const int* Ydata = Y.data<int>();
float* output_data = output->template mutable_data<float>();
hipLaunchKernelGGL(( CECKernel), dim3(CAFFE_GET_BLOCKS(S.numel())), dim3(CAFFE_HIP_NUM_THREADS),
0, context_.hip_stream(),
S.numel(), Sdata, Ydata, margin_, output_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
bool CosineEmbeddingCriterionGradientOp<HIPContext>::RunOnDevice() {
auto& S = Input(0);
auto& Y = Input(1);
auto& dOutput = Input(2);
auto* dS = Output(0, S.sizes(), at::dtype<float>());
const float* Sdata = S.data<float>();
const int* Ydata = Y.data<int>();
const float* dOutput_data = dOutput.data<float>();
float* dSdata = dS->template mutable_data<float>();
hipLaunchKernelGGL(( CECGradientKernel), dim3(CAFFE_GET_BLOCKS(S.numel())), dim3(CAFFE_HIP_NUM_THREADS),
0, context_.hip_stream(),
S.numel(), Sdata, Ydata, dOutput_data, margin_, dSdata);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(
CosineEmbeddingCriterion,
CosineEmbeddingCriterionOp<HIPContext>);
REGISTER_HIP_OPERATOR(
CosineEmbeddingCriterionGradient,
CosineEmbeddingCriterionGradientOp<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/cosine_embedding_criterion_op.h"
namespace caffe2 {
namespace {
__global__ void CECKernel(
const int N, const float* S, const int* Y, const float margin,
float* output) {
CUDA_1D_KERNEL_LOOP(i, N) {
output[i] = Y[i] == 1 ? (1. - S[i]) : fmaxf(0.f, S[i] - margin);
}
}
__global__ void CECGradientKernel(
const int N, const float* S, const int* Y, const float* dOutput,
const float margin, float* dS) {
CUDA_1D_KERNEL_LOOP(i, N) {
dS[i] = dOutput[i] * (Y[i] == 1 ? -1 : static_cast<float>(S[i] >= margin));
}
}
} // namespace
template <>
bool CosineEmbeddingCriterionOp<CUDAContext>::RunOnDevice() {
auto& S = Input(0);
auto& Y = Input(1);
CAFFE_ENFORCE(S.numel() == Y.numel(),
"The embedding and label should have the same size.");
auto* output = Output(0, S.sizes(), at::dtype<float>());
const float* Sdata = S.data<float>();
const int* Ydata = Y.data<int>();
float* output_data = output->template mutable_data<float>();
CECKernel<<<CAFFE_GET_BLOCKS(S.numel()), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
S.numel(), Sdata, Ydata, margin_, output_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
bool CosineEmbeddingCriterionGradientOp<CUDAContext>::RunOnDevice() {
auto& S = Input(0);
auto& Y = Input(1);
auto& dOutput = Input(2);
auto* dS = Output(0, S.sizes(), at::dtype<float>());
const float* Sdata = S.data<float>();
const int* Ydata = Y.data<int>();
const float* dOutput_data = dOutput.data<float>();
float* dSdata = dS->template mutable_data<float>();
CECGradientKernel<<<CAFFE_GET_BLOCKS(S.numel()), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
S.numel(), Sdata, Ydata, dOutput_data, margin_, dSdata);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(
CosineEmbeddingCriterion,
CosineEmbeddingCriterionOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(
CosineEmbeddingCriterionGradient,
CosineEmbeddingCriterionGradientOp<CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/core/TensorBase.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <ATen/hip\ApplyGridUtils.cuh>
#include <ATen/hip/detail\OffsetCalculator.cuh>
#include <ATen/native/hip\Loops.cuh>
namespace at::native {
namespace {
void hardswish_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardswish_hip", [&]() {
using opmath_t = at::opmath_type<scalar_t>;
const opmath_t zero(0.0f);
const opmath_t one_sixth(1.0f / 6.0f);
const opmath_t three(3.0f);
const opmath_t six(6.0f);
gpu_kernel(iter, [zero, one_sixth, three, six]GPU_LAMBDA(scalar_t self_val) -> scalar_t {
opmath_t x = static_cast<opmath_t>(self_val);
return x * ::min(::max(x + three, zero), six) * one_sixth;
});
});
}
void hardswish_backward_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardswish_backward_hip", [&]() {
using opmath_t = at::opmath_type<scalar_t>;
const opmath_t zero(0.0f);
const opmath_t three(3.0f);
const opmath_t neg_three(-3.0f);
const opmath_t one_half(0.5f);
gpu_kernel(
iter,
[zero, three, neg_three, one_half]GPU_LAMBDA(scalar_t grad_val_, scalar_t self_val_) -> scalar_t {
opmath_t grad_val = static_cast<opmath_t>(grad_val_);
opmath_t self_val = static_cast<opmath_t>(self_val_);
if (self_val < neg_three) {
return zero;
} else if (self_val <= three) {
return grad_val * ((self_val / three) + one_half);
} else {
return grad_val;
}
});
});
}
} // namespace
REGISTER_DISPATCH(hardswish_stub, &hardswish_kernel);
REGISTER_DISPATCH(hardswish_backward_stub, &hardswish_backward_kernel);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/core/TensorBase.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <ATen/cuda/ApplyGridUtils.cuh>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/native/cuda/Loops.cuh>
namespace at::native {
namespace {
void hardswish_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardswish_cuda", [&]() {
using opmath_t = at::opmath_type<scalar_t>;
const opmath_t zero(0.0f);
const opmath_t one_sixth(1.0f / 6.0f);
const opmath_t three(3.0f);
const opmath_t six(6.0f);
gpu_kernel(iter, [zero, one_sixth, three, six]GPU_LAMBDA(scalar_t self_val) -> scalar_t {
opmath_t x = static_cast<opmath_t>(self_val);
return x * std::min(std::max(x + three, zero), six) * one_sixth;
});
});
}
void hardswish_backward_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardswish_backward_cuda", [&]() {
using opmath_t = at::opmath_type<scalar_t>;
const opmath_t zero(0.0f);
const opmath_t three(3.0f);
const opmath_t neg_three(-3.0f);
const opmath_t one_half(0.5f);
gpu_kernel(
iter,
[zero, three, neg_three, one_half]GPU_LAMBDA(scalar_t grad_val_, scalar_t self_val_) -> scalar_t {
opmath_t grad_val = static_cast<opmath_t>(grad_val_);
opmath_t self_val = static_cast<opmath_t>(self_val_);
if (self_val < neg_three) {
return zero;
} else if (self_val <= three) {
return grad_val * ((self_val / three) + one_half);
} else {
return grad_val;
}
});
});
}
} // namespace
REGISTER_DISPATCH(hardswish_stub, &hardswish_kernel);
REGISTER_DISPATCH(hardswish_backward_stub, &hardswish_backward_kernel);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/hip\HIPConfig.h>
#include <ATen/hip\cub.cuh>
namespace at {
namespace hip {
namespace cub {
template <typename key_t>
void radix_sort_keys(
const key_t* keys_in,
key_t* keys_out,
int64_t n,
bool descending,
int64_t begin_bit,
int64_t end_bit) {
TORCH_CHECK(
n <= std::numeric_limits<int>::max(),
"cub sort does not support sorting more than INT_MAX elements");
using key_t_ = typename detail::hip_type<key_t>::type;
const key_t_* keys_in_ = reinterpret_cast<const key_t_*>(keys_in);
key_t_* keys_out_ = reinterpret_cast<key_t_*>(keys_out);
if (descending) {
CUB_WRAPPER(
NO_ROCM(at_hip_detail)::hipcub::DeviceRadixSort::SortKeysDescending,
keys_in_,
keys_out_,
n,
begin_bit,
end_bit,
c10::hip::getCurrentHIPStream());
} else {
CUB_WRAPPER(
NO_ROCM(at_hip_detail)::hipcub::DeviceRadixSort::SortKeys,
keys_in_,
keys_out_,
n,
begin_bit,
end_bit,
c10::hip::getCurrentHIPStream());
}
}
#define AT_INSTATIATE_CUB_TEMPLATES(scalar_t, ScalarType) \
template void radix_sort_keys( \
const scalar_t* keys_in, \
scalar_t* keys_out, \
int64_t n, \
bool descending, \
int64_t begin_bit, \
int64_t end_bit);
AT_FORALL_SCALAR_TYPES_AND2(Bool, Half, AT_INSTATIATE_CUB_TEMPLATES)
} // namespace cub
} // namespace hip
} // namespace at
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/cuda/CUDAConfig.h>
#include <ATen/cuda/cub.cuh>
namespace at {
namespace cuda {
namespace cub {
template <typename key_t>
void radix_sort_keys(
const key_t* keys_in,
key_t* keys_out,
int64_t n,
bool descending,
int64_t begin_bit,
int64_t end_bit) {
TORCH_CHECK(
n <= std::numeric_limits<int>::max(),
"cub sort does not support sorting more than INT_MAX elements");
using key_t_ = typename detail::cuda_type<key_t>::type;
const key_t_* keys_in_ = reinterpret_cast<const key_t_*>(keys_in);
key_t_* keys_out_ = reinterpret_cast<key_t_*>(keys_out);
if (descending) {
CUB_WRAPPER(
NO_ROCM(at_cuda_detail)::cub::DeviceRadixSort::SortKeysDescending,
keys_in_,
keys_out_,
n,
begin_bit,
end_bit,
c10::cuda::getCurrentCUDAStream());
} else {
CUB_WRAPPER(
NO_ROCM(at_cuda_detail)::cub::DeviceRadixSort::SortKeys,
keys_in_,
keys_out_,
n,
begin_bit,
end_bit,
c10::cuda::getCurrentCUDAStream());
}
}
#define AT_INSTATIATE_CUB_TEMPLATES(scalar_t, ScalarType) \
template void radix_sort_keys( \
const scalar_t* keys_in, \
scalar_t* keys_out, \
int64_t n, \
bool descending, \
int64_t begin_bit, \
int64_t end_bit);
AT_FORALL_SCALAR_TYPES_AND2(Bool, Half, AT_INSTATIATE_CUB_TEMPLATES)
} // namespace cub
} // namespace cuda
} // namespace at
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/cos_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
CosGradientHIPKernel(const int N, const T* dY, const T* X, T* dX) {
HIP_1D_KERNEL_LOOP(i, N) {
#if __HIP_ARCH__ >= 350
dX[i] = -__ldg(dY + i) * sin(__ldg(X + i));
#else
dX[i] = -dY[i] * sin(X[i]);
#endif
}
}
} // namespace
template <>
template <typename T>
bool CosGradientFunctor<HIPContext>::Forward(
const std::vector<int>& X_dims,
const std::vector<int>& /* dY_dims */,
const T* X,
const T* dY,
T* dX,
HIPContext* context) const {
const int size = std::accumulate(
X_dims.cbegin(), X_dims.cend(), 1, std::multiplies<int>());
hipLaunchKernelGGL(( CosGradientHIPKernel),
dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context->hip_stream(), size, dY, X, dX);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(
Cos,
UnaryElementwiseOp<
TensorTypes<float>,
HIPContext,
CosFunctor<HIPContext>>);
REGISTER_HIP_OPERATOR(
CosGradient,
BinaryElementwiseOp<
TensorTypes<float>,
HIPContext,
CosGradientFunctor<HIPContext>>);
} // namespace caffe2
### |
#include "caffe2/operators/cos_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
CosGradientCUDAKernel(const int N, const T* dY, const T* X, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = -__ldg(dY + i) * sin(__ldg(X + i));
#else
dX[i] = -dY[i] * sin(X[i]);
#endif
}
}
} // namespace
template <>
template <typename T>
bool CosGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& X_dims,
const std::vector<int>& /* dY_dims */,
const T* X,
const T* dY,
T* dX,
CUDAContext* context) const {
const int size = std::accumulate(
X_dims.cbegin(), X_dims.cend(), 1, std::multiplies<int>());
CosGradientCUDAKernel<<<
CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, dY, X, dX);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(
Cos,
UnaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
CosFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
CosGradient,
BinaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
CosGradientFunctor<CUDAContext>>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/counter_ops.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(CreateCounter, CreateCounterOp<int64_t, HIPContext>);
REGISTER_HIP_OPERATOR(ResetCounter, ResetCounterOp<int64_t, HIPContext>);
REGISTER_HIP_OPERATOR(CountDown, CountDownOp<int64_t, HIPContext>);
REGISTER_HIP_OPERATOR(
CheckCounterDone,
CheckCounterDoneOp<int64_t, HIPContext>);
REGISTER_HIP_OPERATOR(CountUp, CountUpOp<int64_t, HIPContext>);
REGISTER_HIP_OPERATOR(RetrieveCount, RetrieveCountOp<int64_t, HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/counter_ops.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(CreateCounter, CreateCounterOp<int64_t, CUDAContext>);
REGISTER_CUDA_OPERATOR(ResetCounter, ResetCounterOp<int64_t, CUDAContext>);
REGISTER_CUDA_OPERATOR(CountDown, CountDownOp<int64_t, CUDAContext>);
REGISTER_CUDA_OPERATOR(
CheckCounterDone,
CheckCounterDoneOp<int64_t, CUDAContext>);
REGISTER_CUDA_OPERATOR(CountUp, CountUpOp<int64_t, CUDAContext>);
REGISTER_CUDA_OPERATOR(RetrieveCount, RetrieveCountOp<int64_t, CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/cube_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
CubeGradientHIPKernel(const int N, const T* dY, const T* X, T* dX) {
HIP_1D_KERNEL_LOOP(i, N) {
#if __HIP_ARCH__ >= 350
dX[i] = __ldg(dY + i) * __ldg(X + i) * __ldg(X + i) * T(3);
#else
dX[i] = dY[i] * X[i] * X[i] * T(3);
#endif
}
}
} // namespace
template <>
template <typename T>
bool CubeGradientFunctor<HIPContext>::Forward(
const std::vector<int>& dY_dims,
const std::vector<int>& /* X_dims */,
const T* dY,
const T* X,
T* dX,
HIPContext* context) const {
const int size = std::accumulate(
dY_dims.cbegin(), dY_dims.cend(), 1, std::multiplies<int>());
hipLaunchKernelGGL(( CubeGradientHIPKernel<T>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context->hip_stream(), size, dY, X, dX);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(
Cube,
UnaryElementwiseOp<NumericTypes, HIPContext, CubeFunctor<HIPContext>>);
REGISTER_HIP_OPERATOR(
CubeGradient,
BinaryElementwiseOp<
NumericTypes,
HIPContext,
CubeGradientFunctor<HIPContext>>);
} // namespace caffe2
### |
#include "caffe2/operators/cube_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
CubeGradientCUDAKernel(const int N, const T* dY, const T* X, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = __ldg(dY + i) * __ldg(X + i) * __ldg(X + i) * T(3);
#else
dX[i] = dY[i] * X[i] * X[i] * T(3);
#endif
}
}
} // namespace
template <>
template <typename T>
bool CubeGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& dY_dims,
const std::vector<int>& /* X_dims */,
const T* dY,
const T* X,
T* dX,
CUDAContext* context) const {
const int size = std::accumulate(
dY_dims.cbegin(), dY_dims.cend(), 1, std::multiplies<int>());
CubeGradientCUDAKernel<T>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, dY, X, dX);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(
Cube,
UnaryElementwiseOp<NumericTypes, CUDAContext, CubeFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
CubeGradient,
BinaryElementwiseOp<
NumericTypes,
CUDAContext,
CubeGradientFunctor<CUDAContext>>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/data_couple.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(DataCouple, DataCoupleOp<HIPContext>);
}
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/data_couple.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(DataCouple, DataCoupleOp<CUDAContext>);
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/operators/do_op.h"
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(Do, DoOp<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/operators/do_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(Do, DoOp<CUDAContext>);
} // namespace caffe2
### |
#include "hip/hip_runtime.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/dropout_op.h"
namespace caffe2 {
namespace {
__global__ void DropoutKernel(
const int N, const float ratio, const float* Xdata, float* Ydata, bool* maskdata) {
const float scale = 1. / (1. - ratio);
HIP_1D_KERNEL_LOOP(i, N) {
maskdata[i] = (Ydata[i] > ratio);
Ydata[i] = Xdata[i] * scale * maskdata[i];
}
}
}
template <>
bool DropoutOp<float, HIPContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
if (is_test_) {
if (Y != &X) {
context_.CopySameDevice<float>(
X.numel(), X.data<float>(), Y->template mutable_data<float>());
}
return true;
} else {
float* Ydata = Y->template mutable_data<float>();
auto* mask = Output(1, X.sizes(), at::dtype<bool>());
CAFFE_ENFORCE(X.data<float>() != Ydata, "In-place GPU dropout is broken");
HIPRAND_ENFORCE(
hiprandGenerateUniform(context_.hiprand_generator(), Ydata, X.numel()));
hipLaunchKernelGGL(( DropoutKernel), dim3(CAFFE_GET_BLOCKS(X.numel())), dim3(CAFFE_HIP_NUM_THREADS), 0, context_.hip_stream(), X.numel(), ratio_, X.data<float>(), Ydata, mask->template mutable_data<bool>());
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
}
namespace {
__global__ void DropoutGradientKernel(
const int N, const float* dYdata, const bool* maskdata, const float scale, float* dXdata) {
HIP_1D_KERNEL_LOOP(i, N) {
dXdata[i] = dYdata[i] * maskdata[i] * scale;
}
}
}
template <>
bool DropoutGradientOp<float, HIPContext>::RunOnDevice() {
auto& dY = Input(0);
auto* dX = Output(0, dY.sizes(), at::dtype<float>());
if (is_test_) {
if (dX != &dY) {
context_.CopySameDevice<float>(
dY.numel(), dY.data<float>(), dX->template mutable_data<float>());
}
return true;
} else {
auto& mask = Input(1);
CAFFE_ENFORCE_EQ(dY.numel(), mask.numel());
const float scale = 1. / (1. - ratio_);
hipLaunchKernelGGL(( DropoutGradientKernel), dim3(CAFFE_GET_BLOCKS(dY.numel())), dim3(CAFFE_HIP_NUM_THREADS), 0, context_.hip_stream(), dY.numel(), dY.data<float>(), mask.data<bool>(), scale, dX->template mutable_data<float>());
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
}
REGISTER_HIP_OPERATOR(Dropout, DropoutOp<float, HIPContext>);
REGISTER_HIP_OPERATOR(DropoutGrad, DropoutGradientOp<float, HIPContext>);
} ### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/dropout_op.h"
namespace caffe2 {
namespace {
__global__ void DropoutKernel(
const int N, const float ratio, const float* Xdata, float* Ydata, bool* maskdata) {
const float scale = 1. / (1. - ratio);
CUDA_1D_KERNEL_LOOP(i, N) {
maskdata[i] = (Ydata[i] > ratio);
Ydata[i] = Xdata[i] * scale * maskdata[i];
}
}
}
template <>
bool DropoutOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
if (is_test_) {
if (Y != &X) {
context_.CopySameDevice<float>(
X.numel(), X.data<float>(), Y->template mutable_data<float>());
}
return true;
} else {
float* Ydata = Y->template mutable_data<float>();
auto* mask = Output(1, X.sizes(), at::dtype<bool>());
CAFFE_ENFORCE(X.data<float>() != Ydata, "In-place GPU dropout is broken");
CURAND_ENFORCE(
curandGenerateUniform(context_.curand_generator(), Ydata, X.numel()));
DropoutKernel<<<
CAFFE_GET_BLOCKS(X.numel()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
X.numel(), ratio_, X.data<float>(), Ydata, mask->template mutable_data<bool>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
}
namespace {
__global__ void DropoutGradientKernel(
const int N, const float* dYdata, const bool* maskdata, const float scale, float* dXdata) {
CUDA_1D_KERNEL_LOOP(i, N) {
dXdata[i] = dYdata[i] * maskdata[i] * scale;
}
}
}
template <>
bool DropoutGradientOp<float, CUDAContext>::RunOnDevice() {
auto& dY = Input(0);
auto* dX = Output(0, dY.sizes(), at::dtype<float>());
if (is_test_) {
if (dX != &dY) {
context_.CopySameDevice<float>(
dY.numel(), dY.data<float>(), dX->template mutable_data<float>());
}
return true;
} else {
auto& mask = Input(1);
CAFFE_ENFORCE_EQ(dY.numel(), mask.numel());
const float scale = 1. / (1. - ratio_);
DropoutGradientKernel<<<
CAFFE_GET_BLOCKS(dY.numel()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
dY.numel(), dY.data<float>(), mask.data<bool>(), scale, dX->template mutable_data<float>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
}
REGISTER_CUDA_OPERATOR(Dropout, DropoutOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(DropoutGrad, DropoutGradientOp<float, CUDAContext>);
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/operators/elementwise_add_op.h"
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(
Add,
BinaryElementwiseOp<NumericTypes, HIPContext, AddFunctor<HIPContext>>);
REGISTER_HIP_OPERATOR(
AddGradient,
BinaryElementwiseGradientOp<
NumericTypes,
HIPContext,
AddFunctor<HIPContext>>);
} // namespace caffe2
### |
#include "caffe2/operators/elementwise_add_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(
Add,
BinaryElementwiseOp<NumericTypes, CUDAContext, AddFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
AddGradient,
BinaryElementwiseGradientOp<
NumericTypes,
CUDAContext,
AddFunctor<CUDAContext>>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/operators/elementwise_op_test.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/core/flags.h"
C10_DECLARE_string(caffe_test_root);
template <>
void CopyVector<caffe2::HIPContext>(const int N, const bool* x, bool* y) {
HIP_CHECK(hipMemcpy(y, x, N * sizeof(bool), hipMemcpyHostToDevice));
}
template <>
caffe2::OperatorDef CreateOperatorDef<caffe2::HIPContext>() {
caffe2::OperatorDef def;
def.mutable_device_option()->set_device_type(caffe2::PROTO_HIP);
return def;
}
TEST(ElementwiseGPUTest, And) {
if (!caffe2::HasHipGPU())
return;
elementwiseAnd<caffe2::HIPContext>();
}
TEST(ElementwiseGPUTest, Or) {
if (!caffe2::HasHipGPU())
return;
elementwiseOr<caffe2::HIPContext>();
}
TEST(ElementwiseGPUTest, Xor) {
if (!caffe2::HasHipGPU())
return;
elementwiseXor<caffe2::HIPContext>();
}
TEST(ElementwiseGPUTest, Not) {
if (!caffe2::HasHipGPU())
return;
elementwiseNot<caffe2::HIPContext>();
}
### |
#include "caffe2/operators/elementwise_op_test.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/flags.h"
C10_DECLARE_string(caffe_test_root);
template <>
void CopyVector<caffe2::CUDAContext>(const int N, const bool* x, bool* y) {
CUDA_CHECK(cudaMemcpy(y, x, N * sizeof(bool), cudaMemcpyHostToDevice));
}
template <>
caffe2::OperatorDef CreateOperatorDef<caffe2::CUDAContext>() {
caffe2::OperatorDef def;
def.mutable_device_option()->set_device_type(caffe2::PROTO_CUDA);
return def;
}
TEST(ElementwiseGPUTest, And) {
if (!caffe2::HasCudaGPU())
return;
elementwiseAnd<caffe2::CUDAContext>();
}
TEST(ElementwiseGPUTest, Or) {
if (!caffe2::HasCudaGPU())
return;
elementwiseOr<caffe2::CUDAContext>();
}
TEST(ElementwiseGPUTest, Xor) {
if (!caffe2::HasCudaGPU())
return;
elementwiseXor<caffe2::CUDAContext>();
}
TEST(ElementwiseGPUTest, Not) {
if (!caffe2::HasCudaGPU())
return;
elementwiseNot<caffe2::CUDAContext>();
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/operators/elementwise_sub_op.h"
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(
Sub,
BinaryElementwiseOp<NumericTypes, HIPContext, SubFunctor<HIPContext>>);
REGISTER_HIP_OPERATOR(
SubGradient,
BinaryElementwiseGradientOp<
NumericTypes,
HIPContext,
SubFunctor<HIPContext>>);
} // namespace caffe2
### |
#include "caffe2/operators/elementwise_sub_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(
Sub,
BinaryElementwiseOp<NumericTypes, CUDAContext, SubFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
SubGradient,
BinaryElementwiseGradientOp<
NumericTypes,
CUDAContext,
SubFunctor<CUDAContext>>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/elu_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void EluHIPKernel(const int N, const T alpha, const T* X, T* Y);
template <>
__global__ void
EluHIPKernel<float>(const int N, const float alpha, const float* X, float* Y) {
HIP_1D_KERNEL_LOOP(i, N) {
#if __HIP_ARCH__ >= 350
Y[i] =
__ldg(X + i) < 0 ? alpha * (expf(__ldg(X + i)) - 1.0f) : __ldg(X + i);
#else
Y[i] = X[i] < 0 ? alpha * (expf(X[i]) - 1.0f) : X[i];
#endif
}
}
template <typename T>
__global__ void EluGradientHIPKernel(
const int N,
const T alpha,
const T* dY,
const T* Y,
T* dX) {
HIP_1D_KERNEL_LOOP(i, N) {
#if __HIP_ARCH__ >= 350
dX[i] = __ldg(Y + i) < 0 ? __ldg(dY + i) * (__ldg(Y + i) + alpha)
: __ldg(dY + i);
#else
dX[i] = Y[i] < 0 ? dY[i] * (Y[i] + alpha) : dY[i];
#endif
}
}
} // namespace
template <>
template <typename T>
bool EluFunctor<HIPContext>::
operator()(const int N, const T* X, T* Y, HIPContext* context) const {
hipLaunchKernelGGL(( EluHIPKernel<T>)
, dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context->hip_stream(), N, alpha, X, Y);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
template <typename T>
bool EluGradientFunctor<HIPContext>::Forward(
const std::vector<int>& Y_dims,
const std::vector<int>& /* dY_dims */,
const T* Y,
const T* dY,
T* dX,
HIPContext* context) const {
const int size = std::accumulate(
Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies<int>());
hipLaunchKernelGGL(( EluGradientHIPKernel<T>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context->hip_stream(), size, alpha, dY, Y, dX);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(
Elu,
UnaryElementwiseWithArgsOp<
TensorTypes<float>,
HIPContext,
EluFunctor<HIPContext>>);
REGISTER_HIP_OPERATOR(
EluGradient,
BinaryElementwiseWithArgsOp<
TensorTypes<float>,
HIPContext,
EluGradientFunctor<HIPContext>>);
} // namespace caffe2
### |
#include "caffe2/operators/elu_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void EluCUDAKernel(const int N, const T alpha, const T* X, T* Y);
template <>
__global__ void
EluCUDAKernel<float>(const int N, const float alpha, const float* X, float* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
Y[i] =
__ldg(X + i) < 0 ? alpha * (expf(__ldg(X + i)) - 1.0f) : __ldg(X + i);
#else
Y[i] = X[i] < 0 ? alpha * (expf(X[i]) - 1.0f) : X[i];
#endif
}
}
template <typename T>
__global__ void EluGradientCUDAKernel(
const int N,
const T alpha,
const T* dY,
const T* Y,
T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = __ldg(Y + i) < 0 ? __ldg(dY + i) * (__ldg(Y + i) + alpha)
: __ldg(dY + i);
#else
dX[i] = Y[i] < 0 ? dY[i] * (Y[i] + alpha) : dY[i];
#endif
}
}
} // namespace
template <>
template <typename T>
bool EluFunctor<CUDAContext>::
operator()(const int N, const T* X, T* Y, CUDAContext* context) const {
EluCUDAKernel<T>
<<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, alpha, X, Y);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
template <typename T>
bool EluGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& Y_dims,
const std::vector<int>& /* dY_dims */,
const T* Y,
const T* dY,
T* dX,
CUDAContext* context) const {
const int size = std::accumulate(
Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies<int>());
EluGradientCUDAKernel<T>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, alpha, dY, Y, dX);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(
Elu,
UnaryElementwiseWithArgsOp<
TensorTypes<float>,
CUDAContext,
EluFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
EluGradient,
BinaryElementwiseWithArgsOp<
TensorTypes<float>,
CUDAContext,
EluGradientFunctor<CUDAContext>>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/core/TensorBase.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <ATen/hip\ApplyGridUtils.cuh>
#include <ATen/hip/detail\OffsetCalculator.cuh>
#include <ATen/native/hip\Loops.cuh>
namespace at::native {
namespace {
void hardtanh_backward_kernel(
TensorIterator& iter,
const Scalar& min,
const Scalar& max) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16,
iter.dtype(), "hardtanh_backward_hip", [&]() {
using opmath_t = at::opmath_type<scalar_t>;
auto min_val = min.to<opmath_t>();
auto max_val = max.to<opmath_t>();
gpu_kernel(
iter,
[min_val, max_val] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
opmath_t aop = static_cast<opmath_t>(a);
opmath_t bop = static_cast<opmath_t>(b);
return (bop <= min_val) || (bop >= max_val) ? opmath_t(0) : aop;
});
});
}
} // namespace
REGISTER_DISPATCH(hardtanh_backward_stub, &hardtanh_backward_kernel);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/core/TensorBase.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <ATen/cuda/ApplyGridUtils.cuh>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/native/cuda/Loops.cuh>
namespace at::native {
namespace {
void hardtanh_backward_kernel(
TensorIterator& iter,
const Scalar& min,
const Scalar& max) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16,
iter.dtype(), "hardtanh_backward_cuda", [&]() {
using opmath_t = at::opmath_type<scalar_t>;
auto min_val = min.to<opmath_t>();
auto max_val = max.to<opmath_t>();
gpu_kernel(
iter,
[min_val, max_val] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
opmath_t aop = static_cast<opmath_t>(a);
opmath_t bop = static_cast<opmath_t>(b);
return (bop <= min_val) || (bop >= max_val) ? opmath_t(0) : aop;
});
});
}
} // namespace
REGISTER_DISPATCH(hardtanh_backward_stub, &hardtanh_backward_kernel);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/operators/enforce_finite_op.h"
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
template <>
template <typename T>
bool EnforceFiniteOp<HIPContext>::DoRunWithType() {
buffer_.CopyFrom(Input(0)); // sync copy
EnforceOnCPU<T>(buffer_);
return true;
}
REGISTER_HIP_OPERATOR(EnforceFinite, EnforceFiniteOp<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/operators/enforce_finite_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
template <>
template <typename T>
bool EnforceFiniteOp<CUDAContext>::DoRunWithType() {
buffer_.CopyFrom(Input(0)); // sync copy
EnforceOnCPU<T>(buffer_);
return true;
}
REGISTER_CUDA_OPERATOR(EnforceFinite, EnforceFiniteOp<CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/operators/ensure_cpu_output_op.h"
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
// From HIP Context, takes either HIP or CPU tensor as input, and produce
// TensorCPU
REGISTER_HIP_OPERATOR(EnsureCPUOutput, EnsureCPUOutputOp<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/operators/ensure_cpu_output_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
// From CUDA Context, takes either CUDA or CPU tensor as input, and produce
// TensorCPU
REGISTER_CUDA_OPERATOR(EnsureCPUOutput, EnsureCPUOutputOp<CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/erf_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
namespace {
__global__ void ErfGradientHIPKernel(
const int N,
const float* dY,
const float* X,
float* dX) {
HIP_1D_KERNEL_LOOP(i, N) {
#if __HIP_ARCH__ >= 350
dX[i] = 2.0f / sqrtf(PI) * expf(-powf(__ldg(X+i), 2.0f)) * __ldg(dY + i);
#else
dX[i] = 2.0f / sqrtf(PI) * expf(-powf(X[i], 2.0f)) * dY[i];
#endif
}
}
} // namespace
template <>
template <typename T>
bool ErfGradientFunctor<HIPContext>::Forward(
const std::vector<int>& X_dims,
const std::vector<int>& /* dY_dims */,
const T* X,
const T* dY,
T* dX,
HIPContext* context) const {
const int size = std::accumulate(
X_dims.cbegin(), X_dims.cend(), 1, std::multiplies<int>());
hipLaunchKernelGGL(( ErfGradientHIPKernel),
dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context->hip_stream(), size, dY, X, dX);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(
Erf,
UnaryElementwiseOp<
TensorTypes<float>,
HIPContext,
ErfFunctor<HIPContext>>);
REGISTER_HIP_OPERATOR(
ErfGradient,
BinaryElementwiseOp<
TensorTypes<float>,
HIPContext,
ErfGradientFunctor<HIPContext>>);
} // namespace caffe2
### |
#include "caffe2/operators/erf_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
__global__ void ErfGradientCUDAKernel(
const int N,
const float* dY,
const float* X,
float* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = 2.0f / sqrtf(PI) * expf(-powf(__ldg(X+i), 2.0f)) * __ldg(dY + i);
#else
dX[i] = 2.0f / sqrtf(PI) * expf(-powf(X[i], 2.0f)) * dY[i];
#endif
}
}
} // namespace
template <>
template <typename T>
bool ErfGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& X_dims,
const std::vector<int>& /* dY_dims */,
const T* X,
const T* dY,
T* dX,
CUDAContext* context) const {
const int size = std::accumulate(
X_dims.cbegin(), X_dims.cend(), 1, std::multiplies<int>());
ErfGradientCUDAKernel<<<
CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, dY, X, dX);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(
Erf,
UnaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
ErfFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
ErfGradient,
BinaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
ErfGradientFunctor<CUDAContext>>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/operators/expand_op.h"
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(
Expand,
ExpandOp<
TensorTypes<std::int32_t, std::int64_t, float, double>,
HIPContext>);
REGISTER_HIP_OPERATOR(
ExpandGradient,
ExpandGradientOp<
TensorTypes<std::int32_t, std::int64_t, float, double>,
HIPContext>);
} // namespace caffe2
### |
#include "caffe2/operators/expand_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(
Expand,
ExpandOp<
TensorTypes<std::int32_t, std::int64_t, float, double>,
CUDAContext>);
REGISTER_CUDA_OPERATOR(
ExpandGradient,
ExpandGradientOp<
TensorTypes<std::int32_t, std::int64_t, float, double>,
CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/expand_squeeze_dims_op.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(Squeeze, SqueezeOp<HIPContext>);
REGISTER_HIP_OPERATOR(ExpandDims, ExpandDimsOp<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/expand_squeeze_dims_op.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(Squeeze, SqueezeOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(ExpandDims, ExpandDimsOp<CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/operators/exp_op.h"
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(
Exp,
UnaryElementwiseOp<
TensorTypes<float>,
HIPContext,
ExpFunctor<HIPContext>>);
} // namespace caffe2
### |
#include "caffe2/operators/exp_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(
Exp,
UnaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
ExpFunctor<CUDAContext>>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/filler_op.h"
#include "caffe2/operators/hip/operator_fallback_gpu.h"
namespace caffe2 {
namespace {
__global__ void FillRangeKernel(const int n, float* data) {
HIP_1D_KERNEL_LOOP(index, n) {
data[index] = index;
}
}
template <typename T>
__global__ void FillDiagonalKernel(
const int num_diagonal_elements,
const int64_t step_size,
const T value,
T* data) {
HIP_1D_KERNEL_LOOP(index, num_diagonal_elements) {
data[index * step_size] = value;
}
}
}
template <>
bool RangeFillOp<float, HIPContext>::Fill(Tensor* output) {
int N = output->numel();
hipLaunchKernelGGL(( FillRangeKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context_.hip_stream(), N, output->template mutable_data<float>());
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
template <typename T>
bool DiagonalFillOp<HIPContext>::FillWithType(Tensor* output) {
VerifyOutputShape(output);
auto* data = output->template mutable_data<T>();
int size = output->numel();
// first fill everything with 0
math::Set<T, HIPContext>(size, T(0), data, &context_);
T value = OperatorBase::GetSingleArgument<T>("value", 0);
int64_t step_size = GetStepSize(output);
int num_diagonal_elements = ceil((float)size / step_size);
hipLaunchKernelGGL(( FillDiagonalKernel),
dim3(CAFFE_GET_BLOCKS(num_diagonal_elements)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context_.hip_stream(), num_diagonal_elements, step_size, value, data);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(UniformFill, UniformFillOp<float, HIPContext>);
REGISTER_HIP_OPERATOR(UniformIntFill, UniformFillOp<int, HIPContext>);
REGISTER_HIP_OPERATOR(ConstantFill, ConstantFillOp<HIPContext>);
REGISTER_HIP_OPERATOR(DiagonalFill, DiagonalFillOp<HIPContext>);
REGISTER_HIP_OPERATOR(GaussianFill, GaussianFillOp<float, HIPContext>);
REGISTER_HIP_OPERATOR(XavierFill, XavierFillOp<float, HIPContext>);
REGISTER_HIP_OPERATOR(MSRAFill, MSRAFillOp<float, HIPContext>);
REGISTER_HIP_OPERATOR(RangeFill, RangeFillOp<float, HIPContext>);
REGISTER_HIP_OPERATOR(LengthsRangeFill, GPUFallbackOp);
} // namespace caffe2
### |
#include <cmath>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/filler_op.h"
#include "caffe2/operators/operator_fallback_gpu.h"
namespace caffe2 {
namespace {
__global__ void FillRangeKernel(const int n, float* data) {
CUDA_1D_KERNEL_LOOP(index, n) {
data[index] = index;
}
}
template <typename T>
__global__ void FillDiagonalKernel(
const int num_diagonal_elements,
const int64_t step_size,
const T value,
T* data) {
CUDA_1D_KERNEL_LOOP(index, num_diagonal_elements) {
data[index * step_size] = value;
}
}
}
template <>
bool RangeFillOp<float, CUDAContext>::Fill(Tensor* output) {
int N = output->numel();
FillRangeKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, output->template mutable_data<float>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
template <typename T>
bool DiagonalFillOp<CUDAContext>::FillWithType(Tensor* output) {
VerifyOutputShape(output);
auto* data = output->template mutable_data<T>();
int size = output->numel();
// first fill everything with 0
math::Set<T, CUDAContext>(size, T(0), data, &context_);
T value = OperatorBase::GetSingleArgument<T>("value", 0);
int64_t step_size = GetStepSize(output);
int num_diagonal_elements = ceil((float)size / step_size);
FillDiagonalKernel<<<
CAFFE_GET_BLOCKS(num_diagonal_elements),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(num_diagonal_elements, step_size, value, data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(UniformFill, UniformFillOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(UniformIntFill, UniformFillOp<int, CUDAContext>);
REGISTER_CUDA_OPERATOR(ConstantFill, ConstantFillOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(DiagonalFill, DiagonalFillOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(GaussianFill, GaussianFillOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(XavierFill, XavierFillOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(MSRAFill, MSRAFillOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(RangeFill, RangeFillOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(LengthsRangeFill, GPUFallbackOp);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hipcub/hipcub.hpp>
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/find_op.h"
#include "caffe2/utils/cub_namespace.cuh"
namespace caffe2 {
template <typename T>
__global__ void FindKernel(
int num_needles,
int idx_size,
const T* idx,
const T* needles,
int* out,
int missing_value) {
int needle_idx = blockIdx.x; // One cuda block per needle
T q = needles[needle_idx];
int res = (-1);
for (int j = threadIdx.x; j < idx_size; j += CAFFE_HIP_NUM_THREADS) {
if (idx[j] == q) {
res = max(res, j);
}
}
typedef hipcub::BlockReduce<int, CAFFE_HIP_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
int min_res = BlockReduce(temp_storage).Reduce(res, hipcub::Max());
if (threadIdx.x == 0) {
out[needle_idx] = min_res == (-1) ? missing_value : min_res;
}
}
template <>
template <typename T>
bool FindOp<HIPContext>::DoRunWithType() {
auto& idx = Input(0);
auto& needles = Input(1);
auto* res_indices = Output(0, needles.sizes(), at::dtype<int>());
const T* idx_data = idx.data<T>();
const T* needles_data = needles.data<T>();
int* res_data = res_indices->template mutable_data<int>();
hipLaunchKernelGGL(( FindKernel<
T>), dim3(needles.numel()), dim3(CAFFE_HIP_NUM_THREADS), 0, context_.hip_stream(),
needles.numel(),
idx.numel(),
idx_data,
needles_data,
res_data,
missing_value_);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(Find, FindOp<HIPContext>)
} // namespace caffe2
### |
#include <cub/block/block_reduce.cuh>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/find_op.h"
#include "caffe2/utils/cub_namespace.cuh"
namespace caffe2 {
template <typename T>
__global__ void FindKernel(
int num_needles,
int idx_size,
const T* idx,
const T* needles,
int* out,
int missing_value) {
int needle_idx = blockIdx.x; // One cuda block per needle
T q = needles[needle_idx];
int res = (-1);
for (int j = threadIdx.x; j < idx_size; j += CAFFE_CUDA_NUM_THREADS) {
if (idx[j] == q) {
res = max(res, j);
}
}
typedef cub::BlockReduce<int, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
int min_res = BlockReduce(temp_storage).Reduce(res, cub::Max());
if (threadIdx.x == 0) {
out[needle_idx] = min_res == (-1) ? missing_value : min_res;
}
}
template <>
template <typename T>
bool FindOp<CUDAContext>::DoRunWithType() {
auto& idx = Input(0);
auto& needles = Input(1);
auto* res_indices = Output(0, needles.sizes(), at::dtype<int>());
const T* idx_data = idx.data<T>();
const T* needles_data = needles.data<T>();
int* res_data = res_indices->template mutable_data<int>();
FindKernel<
T><<<needles.numel(), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
needles.numel(),
idx.numel(),
idx_data,
needles_data,
res_data,
missing_value_);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(Find, FindOp<CUDAContext>)
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/floor_op.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T>
__global__ void FloorKernel(const int N, const T* X, T* Y) {
HIP_1D_KERNEL_LOOP(i, N) {
Y[i] = ::floor(X[i]);
}
}
template <>
bool FloorOp<float, HIPContext>::RunOnDevice() {
auto& X = Input(0);
CAFFE_ENFORCE_GT(X.numel(), 0);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
hipLaunchKernelGGL(( FloorKernel),
dim3(CAFFE_GET_BLOCKS(X.numel())),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context_.hip_stream(),
X.numel(), X.data<float>(), Y->template mutable_data<float>());
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(Floor, FloorOp<float, HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/floor_op.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T>
__global__ void FloorKernel(const int N, const T* X, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = std::floor(X[i]);
}
}
template <>
bool FloorOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
CAFFE_ENFORCE_GT(X.numel(), 0);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
FloorKernel<<<
CAFFE_GET_BLOCKS(X.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
X.numel(), X.data<float>(), Y->template mutable_data<float>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(Floor, FloorOp<float, CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/free_op.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(Free, FreeOp<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/free_op.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(Free, FreeOp<CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/core/TensorBase.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <ATen/hip\ApplyGridUtils.cuh>
#include <ATen/hip/detail\OffsetCalculator.cuh>
#include <ATen/native/hip\Loops.cuh>
namespace at::native {
namespace {
void leaky_relu_kernel(TensorIteratorBase& iter, const Scalar& negval_) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"leaky_relu_hip",
[&]() {
using opmath_t = at::opmath_type<scalar_t>;
auto negval = negval_.to<opmath_t>();
gpu_kernel(iter, [negval] GPU_LAMBDA(scalar_t a) -> scalar_t {
opmath_t aop = static_cast<opmath_t>(a);
return aop > opmath_t(0) ? aop : aop * negval;
});
});
}
void leaky_relu_backward_kernel(
TensorIteratorBase& iter,
const Scalar& negval_) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"leaky_relu_backward_hip",
[&]() {
using opmath_t = at::opmath_type<scalar_t>;
auto negval = negval_.to<opmath_t>();
gpu_kernel(
iter, [negval] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
opmath_t aop = static_cast<opmath_t>(a);
opmath_t bop = static_cast<opmath_t>(b);
return aop > opmath_t(0) ? bop : bop * negval;
});
});
}
} // namespace
REGISTER_DISPATCH(leaky_relu_stub, &leaky_relu_kernel);
REGISTER_DISPATCH(leaky_relu_backward_stub, &leaky_relu_backward_kernel);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/core/TensorBase.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <ATen/cuda/ApplyGridUtils.cuh>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/native/cuda/Loops.cuh>
namespace at::native {
namespace {
void leaky_relu_kernel(TensorIteratorBase& iter, const Scalar& negval_) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"leaky_relu_cuda",
[&]() {
using opmath_t = at::opmath_type<scalar_t>;
auto negval = negval_.to<opmath_t>();
gpu_kernel(iter, [negval] GPU_LAMBDA(scalar_t a) -> scalar_t {
opmath_t aop = static_cast<opmath_t>(a);
return aop > opmath_t(0) ? aop : aop * negval;
});
});
}
void leaky_relu_backward_kernel(
TensorIteratorBase& iter,
const Scalar& negval_) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"leaky_relu_backward_cuda",
[&]() {
using opmath_t = at::opmath_type<scalar_t>;
auto negval = negval_.to<opmath_t>();
gpu_kernel(
iter, [negval] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
opmath_t aop = static_cast<opmath_t>(a);
opmath_t bop = static_cast<opmath_t>(b);
return aop > opmath_t(0) ? bop : bop * negval;
});
});
}
} // namespace
REGISTER_DISPATCH(leaky_relu_stub, &leaky_relu_kernel);
REGISTER_DISPATCH(leaky_relu_backward_stub, &leaky_relu_backward_kernel);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/gather_op.h"
#include "caffe2/operators/hip/gather_op.cuh"
namespace caffe2 {
template <>
bool GatherOp<HIPContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, OperatorBase::Input<Tensor>(INDICES, HIP));
}
template <>
template <typename Index>
bool GatherOp<HIPContext>::DoRunWithType() {
// Use shared implementation with BatchGather
return gather_helper::gather_impl_hip<Index>(
this, DATA, INDICES, 0, axis_, wrap_indices_, match_outer_);
}
REGISTER_HIP_OPERATOR(Gather, GatherOp<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/gather_op.h"
#include "caffe2/operators/gather_op.cuh"
namespace caffe2 {
template <>
bool GatherOp<CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, OperatorBase::Input<Tensor>(INDICES, CUDA));
}
template <>
template <typename Index>
bool GatherOp<CUDAContext>::DoRunWithType() {
// Use shared implementation with BatchGather
return gather_helper::gather_impl_cuda<Index>(
this, DATA, INDICES, 0, axis_, wrap_indices_, match_outer_);
}
REGISTER_CUDA_OPERATOR(Gather, GatherOp<CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#ifndef CAFFE2_OPERATORS_UTILS_NMS_GPU_H_
#define CAFFE2_OPERATORS_UTILS_NMS_GPU_H_
#include <vector>
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
namespace utils {
// Computes Non-Maximum Suppression on the GPU
// Reject a bounding box if its region has an intersection-overunion (IoU)
// overlap with a higher scoring selected bounding box larger than a
// threshold.
//
// d_desc_sorted_boxes : pixel coordinates of proposed bounding boxes
// size: (N,4), format: [x1; y1; x2; y2]
// the boxes are sorted by scores in descending order
// N : number of boxes
// d_keep_sorted_list : row indices of the selected proposals, sorted by score
// h_nkeep : number of selected proposals
// dev_delete_mask, host_delete_mask : Tensors that will be used as temp storage
// by NMS
// Those tensors will be resized to the necessary size
// context : current HIP context
TORCH_API void nms_gpu_upright(
const float* d_desc_sorted_boxes,
const int N,
const float thresh,
const bool legacy_plus_one,
int* d_keep_sorted_list,
int* h_nkeep,
TensorHIP& dev_delete_mask,
TensorCPU& host_delete_mask,
HIPContext* context);
struct RotatedBox {
float x_ctr, y_ctr, w, h, a;
};
// Same as nms_gpu_upright, but for rotated boxes with angle info.
// d_desc_sorted_boxes : pixel coordinates of proposed bounding boxes
// size: (N,5), format: [x_ct; y_ctr; width; height; angle]
// the boxes are sorted by scores in descending order
TORCH_API void nms_gpu_rotated(
const float* d_desc_sorted_boxes,
const int N,
const float thresh,
int* d_keep_sorted_list,
int* h_nkeep,
TensorHIP& dev_delete_mask,
TensorCPU& host_delete_mask,
HIPContext* context);
TORCH_API void nms_gpu(
const float* d_desc_sorted_boxes,
const int N,
const float thresh,
const bool legacy_plus_one,
int* d_keep_sorted_list,
int* h_nkeep,
TensorHIP& dev_delete_mask,
TensorCPU& host_delete_mask,
HIPContext* context,
const int box_dim);
} // namespace utils
} // namespace caffe2
#endif // CAFFE2_OPERATORS_UTILS_NMS_GPU_H_
### |
#ifndef CAFFE2_OPERATORS_UTILS_NMS_GPU_H_
#define CAFFE2_OPERATORS_UTILS_NMS_GPU_H_
#include <vector>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace utils {
// Computes Non-Maximum Suppression on the GPU
// Reject a bounding box if its region has an intersection-overunion (IoU)
// overlap with a higher scoring selected bounding box larger than a
// threshold.
//
// d_desc_sorted_boxes : pixel coordinates of proposed bounding boxes
// size: (N,4), format: [x1; y1; x2; y2]
// the boxes are sorted by scores in descending order
// N : number of boxes
// d_keep_sorted_list : row indices of the selected proposals, sorted by score
// h_nkeep : number of selected proposals
// dev_delete_mask, host_delete_mask : Tensors that will be used as temp storage
// by NMS
// Those tensors will be resized to the necessary size
// context : current CUDA context
TORCH_API void nms_gpu_upright(
const float* d_desc_sorted_boxes,
const int N,
const float thresh,
const bool legacy_plus_one,
int* d_keep_sorted_list,
int* h_nkeep,
TensorCUDA& dev_delete_mask,
TensorCPU& host_delete_mask,
CUDAContext* context);
struct RotatedBox {
float x_ctr, y_ctr, w, h, a;
};
// Same as nms_gpu_upright, but for rotated boxes with angle info.
// d_desc_sorted_boxes : pixel coordinates of proposed bounding boxes
// size: (N,5), format: [x_ct; y_ctr; width; height; angle]
// the boxes are sorted by scores in descending order
TORCH_API void nms_gpu_rotated(
const float* d_desc_sorted_boxes,
const int N,
const float thresh,
int* d_keep_sorted_list,
int* h_nkeep,
TensorCUDA& dev_delete_mask,
TensorCPU& host_delete_mask,
CUDAContext* context);
TORCH_API void nms_gpu(
const float* d_desc_sorted_boxes,
const int N,
const float thresh,
const bool legacy_plus_one,
int* d_keep_sorted_list,
int* h_nkeep,
TensorCUDA& dev_delete_mask,
TensorCPU& host_delete_mask,
CUDAContext* context,
const int box_dim);
} // namespace utils
} // namespace caffe2
#endif // CAFFE2_OPERATORS_UTILS_NMS_GPU_H_
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/given_tensor_byte_string_to_uint8_fill_op.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(
GivenTensorByteStringToUInt8Fill,
GivenTensorByteStringToUInt8FillOp<HIPContext>);
}
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/given_tensor_byte_string_to_uint8_fill_op.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(
GivenTensorByteStringToUInt8Fill,
GivenTensorByteStringToUInt8FillOp<CUDAContext>);
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/given_tensor_fill_op.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(GivenTensorFill, GivenTensorFillOp<float, HIPContext>);
REGISTER_HIP_OPERATOR(
GivenTensorDoubleFill,
GivenTensorFillOp<double, HIPContext>);
REGISTER_HIP_OPERATOR(
GivenTensorInt16Fill,
GivenTensorFillOp<int16_t, HIPContext>);
REGISTER_HIP_OPERATOR(GivenTensorIntFill, GivenTensorFillOp<int, HIPContext>);
REGISTER_HIP_OPERATOR(
GivenTensorInt64Fill,
GivenTensorFillOp<int64_t, HIPContext>);
REGISTER_HIP_OPERATOR(
GivenTensorBoolFill,
GivenTensorFillOp<bool, HIPContext>);
}
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/given_tensor_fill_op.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(GivenTensorFill, GivenTensorFillOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
GivenTensorDoubleFill,
GivenTensorFillOp<double, CUDAContext>);
REGISTER_CUDA_OPERATOR(
GivenTensorInt16Fill,
GivenTensorFillOp<int16_t, CUDAContext>);
REGISTER_CUDA_OPERATOR(GivenTensorIntFill, GivenTensorFillOp<int, CUDAContext>);
REGISTER_CUDA_OPERATOR(
GivenTensorInt64Fill,
GivenTensorFillOp<int64_t, CUDAContext>);
REGISTER_CUDA_OPERATOR(
GivenTensorBoolFill,
GivenTensorFillOp<bool, CUDAContext>);
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/glu_op.h"
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
namespace {
__global__ void glu_kernel(
const int M,
const int split_dim_size,
const int N,
const float* Xdata,
float* Ydata) {
const int xOffset = 2 * split_dim_size * N;
const int yOffset = split_dim_size * N;
HIP_1D_KERNEL_LOOP(index, M * split_dim_size * N) {
const int i = index / split_dim_size / N;
const int j = index / N % split_dim_size;
const int k = index % N;
const float x1 = Xdata[i * xOffset + j * N + k];
const float x2 = Xdata[i * xOffset + (j + split_dim_size) * N + k];
Ydata[i * yOffset + j * N + k] = x1 * (1. / (1. + exp(-x2)));
}
}
} // namespace
template <>
void GluOp<float, HIPContext>::ComputeGlu(
const int M,
const int split_dim_size,
const int N,
const float* x_data,
float* y_data) {
hipLaunchKernelGGL(( glu_kernel),
dim3(CAFFE_GET_BLOCKS(M * N * split_dim_size)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context_.hip_stream(), M, split_dim_size, N, x_data, y_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
REGISTER_HIP_OPERATOR(Glu, GluOp<float, HIPContext>);
} // namespace caffe2
### |
#include "caffe2/operators/glu_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
__global__ void glu_kernel(
const int M,
const int split_dim_size,
const int N,
const float* Xdata,
float* Ydata) {
const int xOffset = 2 * split_dim_size * N;
const int yOffset = split_dim_size * N;
CUDA_1D_KERNEL_LOOP(index, M * split_dim_size * N) {
const int i = index / split_dim_size / N;
const int j = index / N % split_dim_size;
const int k = index % N;
const float x1 = Xdata[i * xOffset + j * N + k];
const float x2 = Xdata[i * xOffset + (j + split_dim_size) * N + k];
Ydata[i * yOffset + j * N + k] = x1 * (1. / (1. + exp(-x2)));
}
}
} // namespace
template <>
void GluOp<float, CUDAContext>::ComputeGlu(
const int M,
const int split_dim_size,
const int N,
const float* x_data,
float* y_data) {
glu_kernel<<<
CAFFE_GET_BLOCKS(M * N * split_dim_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(M, split_dim_size, N, x_data, y_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
REGISTER_CUDA_OPERATOR(Glu, GluOp<float, CUDAContext>);
} // namespace caffe2
### |
#include "hip/hip_runtime.h"
#include "caffe2/operators/half_float_ops.h"
#include "caffe2/core/hip/context_gpu.h"
#ifdef CAFFE_HAS_HIP_FP16
namespace caffe2 {
namespace {
__global__ void FloatToHalfKernel(const int N, const float* X, half* Y) {
HIP_1D_KERNEL_LOOP(i, N) {
Y[i] = __float2half(X[i]);
}
}
__global__ void HalfToFloatKernel(const int N, const half* X, float* Y) {
HIP_1D_KERNEL_LOOP(i, N) {
Y[i] = __half2float(X[i]);
}
}
}
template <>
bool FloatToHalfOp<HIPContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0, X.sizes(), at::dtype<at::Half>());
hipLaunchKernelGGL(( FloatToHalfKernel), dim3(CAFFE_GET_BLOCKS(X.numel())), dim3(CAFFE_HIP_NUM_THREADS), 0, context_.hip_stream(), X.numel(), X.data<float>(), reinterpret_cast<half*>(Y->template mutable_data<at::Half>()));
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
bool HalfToFloatOp<HIPContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
hipLaunchKernelGGL(( HalfToFloatKernel), dim3(CAFFE_GET_BLOCKS(X.numel())), dim3(CAFFE_HIP_NUM_THREADS), 0, context_.hip_stream(), X.numel(), reinterpret_cast<const half*>(X.data<at::Half>()), Y->template mutable_data<float>());
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
bool Float16UniformFillOp<HIPContext>::RunOnDevice() {
auto* output = Output(0, shape_, at::dtype<at::Half>());
at::Half* out = output->template mutable_data<at::Half>();
auto leading_dim_sz = output->size(0);
CAFFE_ENFORCE_GT(leading_dim_sz, 0, "The input shape should have the first dimension greater than 0");
int rowsz = output->numel() / output->size(0);
ReinitializeTensor(
&temp_data_buffer_, {rowsz}, at::dtype<float>().device(HIP));
float* temp_data = temp_data_buffer_.template mutable_data<float>();
for (uint64_t i = 0; i < leading_dim_sz; i++) {
math::RandUniform<float, HIPContext>(
rowsz, min_, max_, temp_data, &context_);
hipLaunchKernelGGL(( FloatToHalfKernel), dim3(CAFFE_GET_BLOCKS(rowsz)), dim3(CAFFE_HIP_NUM_THREADS), 0, context_.hip_stream(), rowsz, temp_data, reinterpret_cast<half*>(out + i * rowsz));
C10_HIP_KERNEL_LAUNCH_CHECK();
}
return true;
}
REGISTER_HIP_OPERATOR(FloatToHalf, FloatToHalfOp<HIPContext>);
REGISTER_HIP_OPERATOR(HalfToFloat, HalfToFloatOp<HIPContext>);
REGISTER_HIP_OPERATOR(Float16UniformFill, Float16UniformFillOp<HIPContext>);
}
#endif ### |
#include "caffe2/operators/half_float_ops.h"
#include "caffe2/core/context_gpu.h"
#ifdef CAFFE_HAS_CUDA_FP16
namespace caffe2 {
namespace {
__global__ void FloatToHalfKernel(const int N, const float* X, half* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = __float2half(X[i]);
}
}
__global__ void HalfToFloatKernel(const int N, const half* X, float* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = __half2float(X[i]);
}
}
}
template <>
bool FloatToHalfOp<CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0, X.sizes(), at::dtype<at::Half>());
FloatToHalfKernel<<<
CAFFE_GET_BLOCKS(X.numel()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
X.numel(), X.data<float>(), reinterpret_cast<half*>(Y->template mutable_data<at::Half>()));
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
bool HalfToFloatOp<CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
HalfToFloatKernel<<<
CAFFE_GET_BLOCKS(X.numel()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
X.numel(), reinterpret_cast<const half*>(X.data<at::Half>()), Y->template mutable_data<float>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
bool Float16UniformFillOp<CUDAContext>::RunOnDevice() {
auto* output = Output(0, shape_, at::dtype<at::Half>());
at::Half* out = output->template mutable_data<at::Half>();
auto leading_dim_sz = output->size(0);
CAFFE_ENFORCE_GT(leading_dim_sz, 0, "The input shape should have the first dimension greater than 0");
int rowsz = output->numel() / output->size(0);
ReinitializeTensor(
&temp_data_buffer_, {rowsz}, at::dtype<float>().device(CUDA));
float* temp_data = temp_data_buffer_.template mutable_data<float>();
for (uint64_t i = 0; i < leading_dim_sz; i++) {
math::RandUniform<float, CUDAContext>(
rowsz, min_, max_, temp_data, &context_);
FloatToHalfKernel<<<
CAFFE_GET_BLOCKS(rowsz), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
rowsz, temp_data, reinterpret_cast<half*>(out + i * rowsz));
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
return true;
}
REGISTER_CUDA_OPERATOR(FloatToHalf, FloatToHalfOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(HalfToFloat, HalfToFloatOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(Float16UniformFill, Float16UniformFillOp<CUDAContext>);
}
#endif
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/hard_sigmoid_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void HardSigmoidHIPKernel(
const int N,
const T alpha,
const T beta,
const T* X,
T* Y) {
HIP_1D_KERNEL_LOOP(i, N) {
#if __HIP_ARCH__ >= 350
Y[i] = max(T(0), min(T(1), alpha * __ldg(X + i) + beta));
#else
Y[i] = max(T(0), min(T(1), alpha * X[i] + beta));
#endif
}
}
template <typename T>
__global__ void HardSigmoidGradientHIPKernel(
const int N,
const T alpha,
const T* dY,
const T* Y,
T* dX) {
HIP_1D_KERNEL_LOOP(i, N) {
#if __HIP_ARCH__ >= 350
dX[i] = (__ldg(Y + i) > T(0) && __ldg(Y + i) < T(1)) ? __ldg(dY + i) * alpha
: T(0);
#else
dX[i] = (Y[i] > T(0) && Y[i] < T(1)) ? dY[i] * alpha : T(0);
#endif
}
}
} // namespace
template <>
template <typename T>
bool HardSigmoidFunctor<HIPContext>::
operator()(const int N, const T* X, T* Y, HIPContext* context) const {
hipLaunchKernelGGL(( HardSigmoidHIPKernel<T>)
, dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context->hip_stream(), N, alpha, beta, X, Y);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
template <typename T>
bool HardSigmoidGradientFunctor<HIPContext>::Forward(
const std::vector<int>& Y_dims,
const std::vector<int>& /* dY_dims */,
const T* Y,
const T* dY,
T* dX,
HIPContext* context) const {
const int size = std::accumulate(
Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies<int>());
hipLaunchKernelGGL(( HardSigmoidGradientHIPKernel<T>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context->hip_stream(), size, alpha, dY, Y, dX);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(
HardSigmoid,
UnaryElementwiseWithArgsOp<
TensorTypes<float>,
HIPContext,
HardSigmoidFunctor<HIPContext>>);
REGISTER_HIP_OPERATOR(
HardSigmoidGradient,
BinaryElementwiseWithArgsOp<
TensorTypes<float>,
HIPContext,
HardSigmoidGradientFunctor<HIPContext>>);
} // namespace caffe2
### |
#include "caffe2/operators/hard_sigmoid_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void HardSigmoidCUDAKernel(
const int N,
const T alpha,
const T beta,
const T* X,
T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
Y[i] = max(T(0), min(T(1), alpha * __ldg(X + i) + beta));
#else
Y[i] = max(T(0), min(T(1), alpha * X[i] + beta));
#endif
}
}
template <typename T>
__global__ void HardSigmoidGradientCUDAKernel(
const int N,
const T alpha,
const T* dY,
const T* Y,
T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = (__ldg(Y + i) > T(0) && __ldg(Y + i) < T(1)) ? __ldg(dY + i) * alpha
: T(0);
#else
dX[i] = (Y[i] > T(0) && Y[i] < T(1)) ? dY[i] * alpha : T(0);
#endif
}
}
} // namespace
template <>
template <typename T>
bool HardSigmoidFunctor<CUDAContext>::
operator()(const int N, const T* X, T* Y, CUDAContext* context) const {
HardSigmoidCUDAKernel<T>
<<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, alpha, beta, X, Y);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
template <typename T>
bool HardSigmoidGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& Y_dims,
const std::vector<int>& /* dY_dims */,
const T* Y,
const T* dY,
T* dX,
CUDAContext* context) const {
const int size = std::accumulate(
Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies<int>());
HardSigmoidGradientCUDAKernel<T>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, alpha, dY, Y, dX);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(
HardSigmoid,
UnaryElementwiseWithArgsOp<
TensorTypes<float>,
CUDAContext,
HardSigmoidFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
HardSigmoidGradient,
BinaryElementwiseWithArgsOp<
TensorTypes<float>,
CUDAContext,
HardSigmoidGradientFunctor<CUDAContext>>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/operators/if_op.h"
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(If, IfOp<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/operators/if_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(If, IfOp<CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/im2col_op.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(Im2Col, Im2ColOp<float, HIPContext>);
REGISTER_HIP_OPERATOR(Col2Im, Col2ImOp<float, HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/im2col_op.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(Im2Col, Im2ColOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(Col2Im, Col2ImOp<float, CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/leaky_relu_op.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void LeakyReluKernel(const int N, const T alpha, const T* X, T* Y) {
HIP_1D_KERNEL_LOOP(i, N) {
Y[i] = X[i] >= 0 ? X[i] : X[i] * alpha;
}
}
template <typename T>
__global__ void LeakyReluGradientKernel(
const int N,
const T alpha,
const T* Y,
const T* dY,
T* dX) {
HIP_1D_KERNEL_LOOP(i, N) {
dX[i] = Y[i] >= 0 ? dY[i] : dY[i] * alpha;
}
}
} // namespace
template <>
bool LeakyReluOp<float, HIPContext>::RunOnDevice() {
const auto& X = Input(0);
CAFFE_ENFORCE_GT(X.numel(), 0);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
hipLaunchKernelGGL(( LeakyReluKernel),
dim3(CAFFE_GET_BLOCKS(X.numel())),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context_.hip_stream(),
X.numel(), alpha_, X.data<float>(), Y->template mutable_data<float>());
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
bool LeakyReluGradientOp<float, HIPContext>::RunOnDevice() {
const auto& Y = Input(0);
const auto& dY = Input(1);
auto* dX = Output(0, Y.sizes(), at::dtype<float>());
CAFFE_ENFORCE_EQ(Y.numel(), dY.numel());
hipLaunchKernelGGL(( LeakyReluGradientKernel),
dim3(CAFFE_GET_BLOCKS(Y.numel())),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context_.hip_stream(),
Y.numel(),
alpha_,
Y.data<float>(),
dY.data<float>(),
dX->template mutable_data<float>());
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(LeakyRelu, LeakyReluOp<float, HIPContext>);
REGISTER_HIP_OPERATOR(
LeakyReluGradient,
LeakyReluGradientOp<float, HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/leaky_relu_op.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void LeakyReluKernel(const int N, const T alpha, const T* X, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = X[i] >= 0 ? X[i] : X[i] * alpha;
}
}
template <typename T>
__global__ void LeakyReluGradientKernel(
const int N,
const T alpha,
const T* Y,
const T* dY,
T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
dX[i] = Y[i] >= 0 ? dY[i] : dY[i] * alpha;
}
}
} // namespace
template <>
bool LeakyReluOp<float, CUDAContext>::RunOnDevice() {
const auto& X = Input(0);
CAFFE_ENFORCE_GT(X.numel(), 0);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
LeakyReluKernel<<<
CAFFE_GET_BLOCKS(X.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
X.numel(), alpha_, X.data<float>(), Y->template mutable_data<float>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
bool LeakyReluGradientOp<float, CUDAContext>::RunOnDevice() {
const auto& Y = Input(0);
const auto& dY = Input(1);
auto* dX = Output(0, Y.sizes(), at::dtype<float>());
CAFFE_ENFORCE_EQ(Y.numel(), dY.numel());
LeakyReluGradientKernel<<<
CAFFE_GET_BLOCKS(Y.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
Y.numel(),
alpha_,
Y.data<float>(),
dY.data<float>(),
dX->template mutable_data<float>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(LeakyRelu, LeakyReluOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
LeakyReluGradient,
LeakyReluGradientOp<float, CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/core/TensorBase.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <ATen/hip\ApplyGridUtils.cuh>
#include <ATen/hip/detail\OffsetCalculator.cuh>
#include <ATen/native/hip\Loops.cuh>
namespace at::native {
// -----------------------------------
// log_sigmoid forward
// -----------------------------------
void launch_log_sigmoid_forward_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf, kBFloat16, iter.common_dtype(), "log_sigmoid_forward_hip", [&] {
using opmath_t = at::opmath_type<scalar_t>;
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t in_) -> scalar_t {
const opmath_t in = in_;
const auto min = ::min(opmath_t(0), in);
const auto z = ::exp(-std::abs(in));
return min - std::log1p(z);
});
});
}
namespace {
// -----------------------------------
// log_sigmoid backward
// -----------------------------------
void log_sigmoid_backward_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf, kBFloat16, iter.common_dtype(), "log_sigmoid_backward_hip", [&] {
using opmath_t = at::opmath_type<scalar_t>;
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t in_, scalar_t grad_out_) -> scalar_t {
const opmath_t in = in_;
const opmath_t grad_out = grad_out_;
auto in_negative = in < opmath_t(0);
auto max_deriv = in_negative ? opmath_t(1) : opmath_t(0);
auto sign = in_negative ? opmath_t(1) : -opmath_t(1);
const auto z = ::exp(-std::abs(in));
return grad_out * (max_deriv - sign * (z / (opmath_t(1) + z)));
});
});
}
} // namespace
REGISTER_DISPATCH(log_sigmoid_backward_stub, &log_sigmoid_backward_kernel);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/core/TensorBase.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <ATen/cuda/ApplyGridUtils.cuh>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/native/cuda/Loops.cuh>
namespace at::native {
// -----------------------------------
// log_sigmoid forward
// -----------------------------------
void launch_log_sigmoid_forward_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf, kBFloat16, iter.common_dtype(), "log_sigmoid_forward_cuda", [&] {
using opmath_t = at::opmath_type<scalar_t>;
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t in_) -> scalar_t {
const opmath_t in = in_;
const auto min = std::min(opmath_t(0), in);
const auto z = std::exp(-std::abs(in));
return min - std::log1p(z);
});
});
}
namespace {
// -----------------------------------
// log_sigmoid backward
// -----------------------------------
void log_sigmoid_backward_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf, kBFloat16, iter.common_dtype(), "log_sigmoid_backward_cuda", [&] {
using opmath_t = at::opmath_type<scalar_t>;
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t in_, scalar_t grad_out_) -> scalar_t {
const opmath_t in = in_;
const opmath_t grad_out = grad_out_;
auto in_negative = in < opmath_t(0);
auto max_deriv = in_negative ? opmath_t(1) : opmath_t(0);
auto sign = in_negative ? opmath_t(1) : -opmath_t(1);
const auto z = std::exp(-std::abs(in));
return grad_out * (max_deriv - sign * (z / (opmath_t(1) + z)));
});
});
}
} // namespace
REGISTER_DISPATCH(log_sigmoid_backward_stub, &log_sigmoid_backward_kernel);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/operators/lengths_pad_op.h"
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(LengthsPad, LengthsPadOp<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/operators/lengths_pad_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(LengthsPad, LengthsPadOp<CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/load_save_op.h"
namespace caffe2 {
template <>
void LoadOp<HIPContext>::SetCurrentDevice(BlobProto* proto) {
if (proto->has_tensor()) {
proto->mutable_tensor()->clear_device_detail();
auto* device_detail = proto->mutable_tensor()->mutable_device_detail();
device_detail->set_device_type(PROTO_HIP);
device_detail->set_device_id(CaffeHipGetDevice());
}
}
REGISTER_HIP_OPERATOR(Load, LoadOp<HIPContext>);
REGISTER_HIP_OPERATOR(Save, SaveOp<HIPContext>);
REGISTER_HIP_OPERATOR(Checkpoint, CheckpointOp<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/load_save_op.h"
namespace caffe2 {
template <>
void LoadOp<CUDAContext>::SetCurrentDevice(BlobProto* proto) {
if (proto->has_tensor()) {
proto->mutable_tensor()->clear_device_detail();
auto* device_detail = proto->mutable_tensor()->mutable_device_detail();
device_detail->set_device_type(PROTO_CUDA);
device_detail->set_device_id(CaffeCudaGetDevice());
}
}
REGISTER_CUDA_OPERATOR(Load, LoadOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(Save, SaveOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(Checkpoint, CheckpointOp<CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/locally_connected_op.h"
#include "caffe2/operators/locally_connected_op_impl.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(LC, LocallyConnectedOp<float, HIPContext>);
REGISTER_HIP_OPERATOR(
LCGradient,
LocallyConnectedGradientOp<float, HIPContext>);
REGISTER_HIP_OPERATOR(LC1D, LocallyConnectedOp<float, HIPContext>);
REGISTER_HIP_OPERATOR(
LC1DGradient,
LocallyConnectedGradientOp<float, HIPContext>);
REGISTER_HIP_OPERATOR(LC2D, LocallyConnectedOp<float, HIPContext>);
REGISTER_HIP_OPERATOR(
LC2DGradient,
LocallyConnectedGradientOp<float, HIPContext>);
REGISTER_HIP_OPERATOR(LC3D, LocallyConnectedOp<float, HIPContext>);
REGISTER_HIP_OPERATOR(
LC3DGradient,
LocallyConnectedGradientOp<float, HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/locally_connected_op.h"
#include "caffe2/operators/locally_connected_op_impl.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(LC, LocallyConnectedOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
LCGradient,
LocallyConnectedGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(LC1D, LocallyConnectedOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
LC1DGradient,
LocallyConnectedGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(LC2D, LocallyConnectedOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
LC2DGradient,
LocallyConnectedGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(LC3D, LocallyConnectedOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
LC3DGradient,
LocallyConnectedGradientOp<float, CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/log1p_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
Log1pGradientHIPKernel(const int N, const T* dY, const T* X, T* dX) {
HIP_1D_KERNEL_LOOP(i, N) {
#if __HIP_ARCH__ >= 350
dX[i] = __ldg(dY + i) / (__ldg(X + i) + T(1));
#else
dX[i] = dY[i] / (X[i] + T(1));
#endif
}
}
} // namespace
template <>
template <typename T>
bool Log1pGradientFunctor<HIPContext>::Forward(
const std::vector<int>& X_dims,
const std::vector<int>& /* dY_dims */,
const T* X,
const T* dY,
T* dX,
HIPContext* context) const {
const int size = std::accumulate(
X_dims.cbegin(), X_dims.cend(), 1, std::multiplies<int>());
hipLaunchKernelGGL(( Log1pGradientHIPKernel<T>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context->hip_stream(), size, dY, X, dX);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(
Log1p,
UnaryElementwiseOp<
TensorTypes<float>,
HIPContext,
Log1pFunctor<HIPContext>>);
REGISTER_HIP_OPERATOR(
Log1pGradient,
BinaryElementwiseOp<
TensorTypes<float>,
HIPContext,
Log1pGradientFunctor<HIPContext>>);
} // namespace caffe2
### |
#include "caffe2/operators/log1p_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
Log1pGradientCUDAKernel(const int N, const T* dY, const T* X, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = __ldg(dY + i) / (__ldg(X + i) + T(1));
#else
dX[i] = dY[i] / (X[i] + T(1));
#endif
}
}
} // namespace
template <>
template <typename T>
bool Log1pGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& X_dims,
const std::vector<int>& /* dY_dims */,
const T* X,
const T* dY,
T* dX,
CUDAContext* context) const {
const int size = std::accumulate(
X_dims.cbegin(), X_dims.cend(), 1, std::multiplies<int>());
Log1pGradientCUDAKernel<T>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, dY, X, dX);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(
Log1p,
UnaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
Log1pFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
Log1pGradient,
BinaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
Log1pGradientFunctor<CUDAContext>>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/logit_op.h"
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void LogitKernel(const int N, const T* X, const float eps, T* Y) {
HIP_1D_KERNEL_LOOP(i, N) {
Y[i] = fminf(X[i], (T(1) - eps));
Y[i] = fmaxf(Y[i], eps);
Y[i] = logf(Y[i] / (T(1) - Y[i]));
}
}
template <typename T>
__global__ void LogitGradientKernel(
const int N,
const T* X,
const T* dY,
const float eps,
T* dX) {
HIP_1D_KERNEL_LOOP(i, N) {
dX[i] = (X[i] < eps || X[i] > T(1) - eps) ? T(0)
: (dY[i] / X[i] / (T(1) - X[i]));
}
}
} // namespace
template <>
template <typename T>
bool LogitFunctor<HIPContext>::
operator()(const int N, const T* X, T* Y, HIPContext* context) const {
hipLaunchKernelGGL(( LogitKernel<T>)
, dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context->hip_stream(), N, X, eps_, Y);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
bool LogitGradientOp<float, HIPContext>::RunOnDevice() {
auto& X = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
dX->ResizeLike(X);
int n = X.size();
hipLaunchKernelGGL(( LogitGradientKernel),
dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context_.hip_stream(),
n,
X.data<float>(),
dY.data<float>(),
eps_,
dX->template mutable_data<float>());
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(
Logit,
UnaryElementwiseWithArgsOp<
TensorTypes<float>,
HIPContext,
LogitFunctor<HIPContext>>);
REGISTER_HIP_OPERATOR(LogitGradient, LogitGradientOp<float, HIPContext>);
} // namespace caffe2
### |
#include "caffe2/operators/logit_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void LogitKernel(const int N, const T* X, const float eps, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = fminf(X[i], (T(1) - eps));
Y[i] = fmaxf(Y[i], eps);
Y[i] = logf(Y[i] / (T(1) - Y[i]));
}
}
template <typename T>
__global__ void LogitGradientKernel(
const int N,
const T* X,
const T* dY,
const float eps,
T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
dX[i] = (X[i] < eps || X[i] > T(1) - eps) ? T(0)
: (dY[i] / X[i] / (T(1) - X[i]));
}
}
} // namespace
template <>
template <typename T>
bool LogitFunctor<CUDAContext>::
operator()(const int N, const T* X, T* Y, CUDAContext* context) const {
LogitKernel<T>
<<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, X, eps_, Y);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
bool LogitGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
dX->ResizeLike(X);
int n = X.size();
LogitGradientKernel<<<
CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
n,
X.data<float>(),
dY.data<float>(),
eps_,
dX->template mutable_data<float>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(
Logit,
UnaryElementwiseWithArgsOp<
TensorTypes<float>,
CUDAContext,
LogitFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(LogitGradient, LogitGradientOp<float, CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/operators/log_op.h"
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(
Log,
UnaryElementwiseOp<
TensorTypes<float>,
HIPContext,
LogFunctor<HIPContext>>);
} // namespace caffe2
### |
#include "caffe2/operators/log_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(
Log,
UnaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
LogFunctor<CUDAContext>>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/loss_op.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(AveragedLoss, AveragedLoss<float, HIPContext>);
REGISTER_HIP_OPERATOR(
AveragedLossGradient,
AveragedLossGradient<float, HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/loss_op.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(AveragedLoss, AveragedLoss<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
AveragedLossGradient,
AveragedLossGradient<float, CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/lpnorm_op.h"
#include "caffe2/operators/hip/operator_fallback_gpu.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(LpNorm, GPUFallbackOp);
REGISTER_HIP_OPERATOR(LpNormGradient, GPUFallbackOp);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/lpnorm_op.h"
#include "caffe2/operators/operator_fallback_gpu.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(LpNorm, GPUFallbackOp);
REGISTER_CUDA_OPERATOR(LpNormGradient, GPUFallbackOp);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/operators/matmul_op.h"
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(MatMul, MatMulOp<float, HIPContext>);
}
### |
#include "caffe2/operators/matmul_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(MatMul, MatMulOp<float, CUDAContext>);
}
### |
// !!! This is a file automatically generated by hipify!!!
#pragma once
#include <cfloat>
#include "caffe2/core/context.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/conv_pool_op_base.h"
#include "caffe2/operators/pool_op.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
class MaxPoolWithIndexOp final : public ConvPoolOpBase<HIPContext> {
public:
USE_CONV_POOL_BASE_FUNCTIONS(HIPContext);
MaxPoolWithIndexOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<HIPContext>(operator_def, ws) {}
~MaxPoolWithIndexOp() {}
template <typename T>
bool DoRunWithType();
bool RunOnDevice() override;
// Input: X
// Output: Y, mask
};
class MaxPoolWithIndexGradientOp final : public ConvPoolOpBase<HIPContext> {
public:
USE_CONV_POOL_BASE_FUNCTIONS(HIPContext);
MaxPoolWithIndexGradientOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<HIPContext>(operator_def, ws) {}
~MaxPoolWithIndexGradientOp() {}
template <typename T>
bool DoRunWithType();
bool RunOnDevice() override;
// Input: X, dY, mask
// Output: dX
};
}; // namespace caffe2
### |
#pragma once
#include <cfloat>
#include "caffe2/core/context.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/conv_pool_op_base.h"
#include "caffe2/operators/pool_op.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
class MaxPoolWithIndexOp final : public ConvPoolOpBase<CUDAContext> {
public:
USE_CONV_POOL_BASE_FUNCTIONS(CUDAContext);
MaxPoolWithIndexOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CUDAContext>(operator_def, ws) {}
~MaxPoolWithIndexOp() {}
template <typename T>
bool DoRunWithType();
bool RunOnDevice() override;
// Input: X
// Output: Y, mask
};
class MaxPoolWithIndexGradientOp final : public ConvPoolOpBase<CUDAContext> {
public:
USE_CONV_POOL_BASE_FUNCTIONS(CUDAContext);
MaxPoolWithIndexGradientOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CUDAContext>(operator_def, ws) {}
~MaxPoolWithIndexGradientOp() {}
template <typename T>
bool DoRunWithType();
bool RunOnDevice() override;
// Input: X, dY, mask
// Output: dX
};
}; // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/core/TensorBase.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <ATen/hip\ApplyGridUtils.cuh>
#include <ATen/hip/detail\OffsetCalculator.cuh>
#include <ATen/native/hip\Loops.cuh>
namespace at::native {
namespace {
void mish_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"mish_hip",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
const opmath_t x_acc = static_cast<opmath_t>(x);
return x_acc *
c10::hip::compat::tanh(
c10::hip::compat::log1p(c10::hip::compat::exp(x_acc)));
});
});
}
void mish_backward_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"mish_backward_hip",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
const opmath_t dy_acc = static_cast<opmath_t>(dy);
const opmath_t x_acc = static_cast<opmath_t>(x);
const opmath_t s_acc =
opmath_t(1) / (opmath_t(1) + c10::hip::compat::exp(-x_acc));
const opmath_t t_acc = c10::hip::compat::tanh(
c10::hip::compat::log1p(c10::hip::compat::exp(x_acc)));
return dy_acc *
(t_acc + x_acc * s_acc * (opmath_t(1) - t_acc * t_acc));
});
});
}
} // namespace
REGISTER_DISPATCH(mish_stub, &mish_kernel);
REGISTER_DISPATCH(mish_backward_stub, &mish_backward_kernel);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/core/TensorBase.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <ATen/cuda/ApplyGridUtils.cuh>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/native/cuda/Loops.cuh>
namespace at::native {
namespace {
void mish_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"mish_cuda",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
const opmath_t x_acc = static_cast<opmath_t>(x);
return x_acc *
c10::cuda::compat::tanh(
c10::cuda::compat::log1p(c10::cuda::compat::exp(x_acc)));
});
});
}
void mish_backward_kernel(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"mish_backward_cuda",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
const opmath_t dy_acc = static_cast<opmath_t>(dy);
const opmath_t x_acc = static_cast<opmath_t>(x);
const opmath_t s_acc =
opmath_t(1) / (opmath_t(1) + c10::cuda::compat::exp(-x_acc));
const opmath_t t_acc = c10::cuda::compat::tanh(
c10::cuda::compat::log1p(c10::cuda::compat::exp(x_acc)));
return dy_acc *
(t_acc + x_acc * s_acc * (opmath_t(1) - t_acc * t_acc));
});
});
}
} // namespace
REGISTER_DISPATCH(mish_stub, &mish_kernel);
REGISTER_DISPATCH(mish_backward_stub, &mish_backward_kernel);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/mean_op.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(Mean, MeanOp<HIPContext>);
REGISTER_HIP_OPERATOR(MeanGradient, MeanGradientOp<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/mean_op.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(Mean, MeanOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(MeanGradient, MeanGradientOp<CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
namespace {
class GetGPUMemoryUsageOp final : public Operator<HIPContext> {
public:
template<class... Args> explicit GetGPUMemoryUsageOp(Args&&... args)
: Operator<HIPContext>(std::forward<Args>(args)...) {}
~GetGPUMemoryUsageOp() override {}
bool RunOnDevice() override {
TORCH_CHECK_EQ(InputSize(), 0);
TORCH_CHECK_EQ(OutputSize(), 1);
std::vector<long> total_by_gpu = HIPContext::TotalMemoryByGpu();
std::vector<long> max_by_gpu = HIPContext::MaxMemoryByGpu();
TORCH_CHECK_EQ(total_by_gpu.size(), max_by_gpu.size());
auto* stats = Output(0, {2, static_cast<int64_t>(total_by_gpu.size())}, at::dtype<long>());
context_.CopyFromCPU<long>(
total_by_gpu.size(),
total_by_gpu.data(),
stats->template mutable_data<long>());
context_.CopyFromCPU<long>(
max_by_gpu.size(),
max_by_gpu.data(),
stats->template mutable_data<long>() + total_by_gpu.size());
return true;
}
};
OPERATOR_SCHEMA(GetGPUMemoryUsage)
.NumInputs(0)
.NumOutputs(1)
.SetDoc(R"DOC(Fetches GPU memory stats from HIPContext. Result is stored
in output blob with shape (2, num_gpus). First row contains the total
current memory usage, and the second row the maximum usage during
this execution.
NOTE: --caffe2_gpu_memory_tracking flag must be enabled to use this op.
)DOC");
REGISTER_HIP_OPERATOR(GetGPUMemoryUsage, GetGPUMemoryUsageOp);
}
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
namespace {
class GetGPUMemoryUsageOp final : public Operator<CUDAContext> {
public:
template<class... Args> explicit GetGPUMemoryUsageOp(Args&&... args)
: Operator<CUDAContext>(std::forward<Args>(args)...) {}
~GetGPUMemoryUsageOp() override {}
bool RunOnDevice() override {
TORCH_CHECK_EQ(InputSize(), 0);
TORCH_CHECK_EQ(OutputSize(), 1);
std::vector<long> total_by_gpu = CUDAContext::TotalMemoryByGpu();
std::vector<long> max_by_gpu = CUDAContext::MaxMemoryByGpu();
TORCH_CHECK_EQ(total_by_gpu.size(), max_by_gpu.size());
auto* stats = Output(0, {2, static_cast<int64_t>(total_by_gpu.size())}, at::dtype<long>());
context_.CopyFromCPU<long>(
total_by_gpu.size(),
total_by_gpu.data(),
stats->template mutable_data<long>());
context_.CopyFromCPU<long>(
max_by_gpu.size(),
max_by_gpu.data(),
stats->template mutable_data<long>() + total_by_gpu.size());
return true;
}
};
OPERATOR_SCHEMA(GetGPUMemoryUsage)
.NumInputs(0)
.NumOutputs(1)
.SetDoc(R"DOC(Fetches GPU memory stats from CUDAContext. Result is stored
in output blob with shape (2, num_gpus). First row contains the total
current memory usage, and the second row the maximum usage during
this execution.
NOTE: --caffe2_gpu_memory_tracking flag must be enabled to use this op.
)DOC");
REGISTER_CUDA_OPERATOR(GetGPUMemoryUsage, GetGPUMemoryUsageOp);
}
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/minmax_ops.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void SelectGradientHIPKernel(
const int N,
const T* dY,
const T* X,
const T* Y,
T* dX) {
const int i = blockIdx.x * CAFFE_HIP_NUM_THREADS + threadIdx.x;
if (i < N) {
#if __HIP_ARCH__ >= 350 || defined(USE_ROCM)
dX[i] = __ldg(X + i) == __ldg(Y + i) ? __ldg(dY + i) : T(0);
#else
dX[i] = X[i] == Y[i] ? dY[i] : T(0);
#endif
}
}
} // namespace
template <>
bool SelectGradientOpBase<float, HIPContext>::RunOnDevice() {
const auto& Y = Input(0);
const auto& dY = Input(1);
const int N = Y.numel();
const int M = math::DivUp(N, CAFFE_HIP_NUM_THREADS);
const float* dY_data = dY.data<float>();
const float* Y_data = Y.data<float>();
for (int i = 0; i < OutputSize(); i++) {
const auto& Xi = Input(i + 2);
auto* dXi = Output(i, Xi.sizes(), at::dtype<float>());
const float* Xi_data = Xi.data<float>();
float* dXi_data = dXi->mutable_data<float>();
if (N > 0) {
hipLaunchKernelGGL(( SelectGradientHIPKernel<float>)
, dim3(M), dim3(CAFFE_HIP_NUM_THREADS), 0, context_.hip_stream(),
N, dY_data, Xi_data, Y_data, dXi_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
}
return true;
}
REGISTER_HIP_OPERATOR(Min, MinOp<float, HIPContext>);
REGISTER_HIP_OPERATOR(MinGradient, MinGradientOp<float, HIPContext>);
REGISTER_HIP_OPERATOR(Max, MaxOp<float, HIPContext>);
REGISTER_HIP_OPERATOR(MaxGradient, MaxGradientOp<float, HIPContext>);
} // namespace caffe2
### |
#include "caffe2/operators/minmax_ops.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void SelectGradientCUDAKernel(
const int N,
const T* dY,
const T* X,
const T* Y,
T* dX) {
const int i = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (i < N) {
#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
dX[i] = __ldg(X + i) == __ldg(Y + i) ? __ldg(dY + i) : T(0);
#else
dX[i] = X[i] == Y[i] ? dY[i] : T(0);
#endif
}
}
} // namespace
template <>
bool SelectGradientOpBase<float, CUDAContext>::RunOnDevice() {
const auto& Y = Input(0);
const auto& dY = Input(1);
const int N = Y.numel();
const int M = math::DivUp(N, CAFFE_CUDA_NUM_THREADS);
const float* dY_data = dY.data<float>();
const float* Y_data = Y.data<float>();
for (int i = 0; i < OutputSize(); i++) {
const auto& Xi = Input(i + 2);
auto* dXi = Output(i, Xi.sizes(), at::dtype<float>());
const float* Xi_data = Xi.data<float>();
float* dXi_data = dXi->mutable_data<float>();
if (N > 0) {
SelectGradientCUDAKernel<float>
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
N, dY_data, Xi_data, Y_data, dXi_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
}
return true;
}
REGISTER_CUDA_OPERATOR(Min, MinOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(MinGradient, MinGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(Max, MaxOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(MaxGradient, MaxGradientOp<float, CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/mod_op.h"
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void ModOpSimpleKernel(const int N, const int64_t divisor_,
const T* data_ptr, T* output_ptr) {
HIP_1D_KERNEL_LOOP(i, N) {
output_ptr[i] = data_ptr[i] % divisor_;
}
}
template <typename T>
__global__ void ModOpKernel(const int N, const int64_t divisor_,
const T* data_ptr, T* output_ptr) {
HIP_1D_KERNEL_LOOP(i, N) {
output_ptr[i] = data_ptr[i] % divisor_;
if (output_ptr[i] && ((output_ptr[i] > 0) != (divisor_ > 0))) {
output_ptr[i] += divisor_;
}
}
}
} // namespace
template <>
template <typename T>
bool ModOp<HIPContext>::DoRunWithType() {
auto& data = Input(DATA);
auto N = data.numel();
const auto* data_ptr = data.template data<T>();
auto* output = Output(0, data.sizes(), at::dtype<T>());
auto* output_ptr = output->template mutable_data<T>();
if (sign_follow_divisor_) {
hipLaunchKernelGGL(( ModOpKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context_.hip_stream(),
N, divisor_, data_ptr, output_ptr);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
hipLaunchKernelGGL(( ModOpSimpleKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context_.hip_stream(),
N, divisor_, data_ptr, output_ptr);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
return true;
}
REGISTER_HIP_OPERATOR(Mod, ModOp<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/operators/mod_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void ModOpSimpleKernel(const int N, const int64_t divisor_,
const T* data_ptr, T* output_ptr) {
CUDA_1D_KERNEL_LOOP(i, N) {
output_ptr[i] = data_ptr[i] % divisor_;
}
}
template <typename T>
__global__ void ModOpKernel(const int N, const int64_t divisor_,
const T* data_ptr, T* output_ptr) {
CUDA_1D_KERNEL_LOOP(i, N) {
output_ptr[i] = data_ptr[i] % divisor_;
if (output_ptr[i] && ((output_ptr[i] > 0) != (divisor_ > 0))) {
output_ptr[i] += divisor_;
}
}
}
} // namespace
template <>
template <typename T>
bool ModOp<CUDAContext>::DoRunWithType() {
auto& data = Input(DATA);
auto N = data.numel();
const auto* data_ptr = data.template data<T>();
auto* output = Output(0, data.sizes(), at::dtype<T>());
auto* output_ptr = output->template mutable_data<T>();
if (sign_follow_divisor_) {
ModOpKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N, divisor_, data_ptr, output_ptr);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
ModOpSimpleKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N, divisor_, data_ptr, output_ptr);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
return true;
}
REGISTER_CUDA_OPERATOR(Mod, ModOp<CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/multi_class_accuracy_op.h"
#include "caffe2/utils/hip/GpuAtomics.cuh"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
__global__ void MultiClassAccuracyKernel(const int N, const int D, const float* Xdata,
const int* labeldata, float* accuracies, int* amounts) {
HIP_1D_KERNEL_LOOP(i, N) {
float maxval = Xdata[i * D];
int maxid = 0;
for (int j = 1; j < D; ++j) {
if (Xdata[i * D + j] > maxval) {
maxval = Xdata[i * D + j];
maxid = j;
}
}
int labelid = labeldata[i];
if (maxid == labelid) {
gpu_atomic_add(accuracies + labelid, static_cast<float>(1));
}
gpu_atomic_add(amounts + labelid, static_cast<int>(1));
}
}
__global__ void MultiClassAccuracyDivideKernel(
const int D, float* accuracies, const int* amounts) {
HIP_1D_KERNEL_LOOP(i, D) {
if (amounts[i]) {
accuracies[i] /= amounts[i];
}
}
}
} // namespace
template <>
bool MultiClassAccuracyOp<float, HIPContext>::RunOnDevice() {
auto& X = Input(PREDICTION);
auto& label = Input(LABEL);
TORCH_DCHECK_EQ(X.dim(), 2);
// amount, number of instances
int N = X.dim32(0);
// dimension, number of classes
int D = X.dim32(1);
TORCH_DCHECK_EQ(label.dim(), 1);
TORCH_DCHECK_EQ(label.dim32(0), N);
auto* Y0 = Output(0, {D}, at::dtype<float>());
auto* Y1 = Output(1, {D}, at::dtype<int>());
const float* Xdata = X.data<float>();
const int* labeldata = label.data<int>();
float* accuracies = Y0->template mutable_data<float>();
int* amounts = Y1->template mutable_data<int>();
math::Set<float, HIPContext>(D, 0.0, accuracies, &context_);
math::Set<int, HIPContext>(D, 0, amounts, &context_);
hipLaunchKernelGGL(( MultiClassAccuracyKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_HIP_NUM_THREADS),
0, context_.hip_stream(),
N, D, Xdata, labeldata, accuracies, amounts);
C10_HIP_KERNEL_LAUNCH_CHECK();
hipLaunchKernelGGL(( MultiClassAccuracyDivideKernel), dim3(CAFFE_GET_BLOCKS(D)), dim3(CAFFE_HIP_NUM_THREADS),
0, context_.hip_stream(),
D, accuracies, amounts);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(
MultiClassAccuracy, MultiClassAccuracyOp<float, HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/multi_class_accuracy_op.h"
#include "caffe2/utils/GpuAtomics.cuh"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
__global__ void MultiClassAccuracyKernel(const int N, const int D, const float* Xdata,
const int* labeldata, float* accuracies, int* amounts) {
CUDA_1D_KERNEL_LOOP(i, N) {
float maxval = Xdata[i * D];
int maxid = 0;
for (int j = 1; j < D; ++j) {
if (Xdata[i * D + j] > maxval) {
maxval = Xdata[i * D + j];
maxid = j;
}
}
int labelid = labeldata[i];
if (maxid == labelid) {
gpu_atomic_add(accuracies + labelid, static_cast<float>(1));
}
gpu_atomic_add(amounts + labelid, static_cast<int>(1));
}
}
__global__ void MultiClassAccuracyDivideKernel(
const int D, float* accuracies, const int* amounts) {
CUDA_1D_KERNEL_LOOP(i, D) {
if (amounts[i]) {
accuracies[i] /= amounts[i];
}
}
}
} // namespace
template <>
bool MultiClassAccuracyOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(PREDICTION);
auto& label = Input(LABEL);
TORCH_DCHECK_EQ(X.dim(), 2);
// amount, number of instances
int N = X.dim32(0);
// dimension, number of classes
int D = X.dim32(1);
TORCH_DCHECK_EQ(label.dim(), 1);
TORCH_DCHECK_EQ(label.dim32(0), N);
auto* Y0 = Output(0, {D}, at::dtype<float>());
auto* Y1 = Output(1, {D}, at::dtype<int>());
const float* Xdata = X.data<float>();
const int* labeldata = label.data<int>();
float* accuracies = Y0->template mutable_data<float>();
int* amounts = Y1->template mutable_data<int>();
math::Set<float, CUDAContext>(D, 0.0, accuracies, &context_);
math::Set<int, CUDAContext>(D, 0, amounts, &context_);
MultiClassAccuracyKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
N, D, Xdata, labeldata, accuracies, amounts);
C10_CUDA_KERNEL_LAUNCH_CHECK();
MultiClassAccuracyDivideKernel<<<CAFFE_GET_BLOCKS(D), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
D, accuracies, amounts);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(
MultiClassAccuracy, MultiClassAccuracyOp<float, CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/negate_gradient_op.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(NegateGradient, NegateGradientOp<HIPContext>)
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/negate_gradient_op.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(NegateGradient, NegateGradientOp<CUDAContext>)
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/operators/negative_op.h"
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(
Negative,
UnaryElementwiseOp<
NumericTypes,
HIPContext,
NegativeFunctor<HIPContext>>);
} // namespace caffe2
### |
#include "caffe2/operators/negative_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(
Negative,
UnaryElementwiseOp<
NumericTypes,
CUDAContext,
NegativeFunctor<CUDAContext>>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hipcub/hipcub.hpp>
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/one_hot_ops.h"
#include "caffe2/utils/cub_namespace.cuh"
namespace caffe2 {
__global__ void OneHotOpKernel(
const int64_t batch_size,
const int64_t index_size,
const int64_t* indices,
float* output) {
HIP_1D_KERNEL_LOOP(i, batch_size) {
output[i * index_size + indices[i]] = 1.;
}
}
template <>
void OneHotOp<HIPContext>::DoOneHotOp(
int64_t batch_size,
int64_t index_size,
const Tensor& indices,
Tensor* output) {
float* output_ptr = output->template mutable_data<float>();
math::Set<float, HIPContext>(output->numel(), 0., output_ptr, &context_);
hipLaunchKernelGGL(( OneHotOpKernel),
dim3(CAFFE_GET_BLOCKS(batch_size)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context_.hip_stream(),
batch_size, index_size, indices.data<int64_t>(), output_ptr);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
REGISTER_HIP_OPERATOR(OneHot, OneHotOp<HIPContext>);
} // namespace
### |
#include <cub/block/block_reduce.cuh>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/one_hot_ops.h"
#include "caffe2/utils/cub_namespace.cuh"
namespace caffe2 {
__global__ void OneHotOpKernel(
const int64_t batch_size,
const int64_t index_size,
const int64_t* indices,
float* output) {
CUDA_1D_KERNEL_LOOP(i, batch_size) {
output[i * index_size + indices[i]] = 1.;
}
}
template <>
void OneHotOp<CUDAContext>::DoOneHotOp(
int64_t batch_size,
int64_t index_size,
const Tensor& indices,
Tensor* output) {
float* output_ptr = output->template mutable_data<float>();
math::Set<float, CUDAContext>(output->numel(), 0., output_ptr, &context_);
OneHotOpKernel<<<
CAFFE_GET_BLOCKS(batch_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
batch_size, index_size, indices.data<int64_t>(), output_ptr);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
REGISTER_CUDA_OPERATOR(OneHot, OneHotOp<CUDAContext>);
} // namespace
### |
#ifndef CAFFE2_OPERATORS_OPERATOR_FALLBACK_H_
#define CAFFE2_OPERATORS_OPERATOR_FALLBACK_H_
#include "caffe2/core/common.h"
#include "caffe2/core/context.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/core/operator.h"
#include "caffe2/proto/caffe2_pb.h"
namespace caffe2 {
template <typename SkipOutputCopy>
class GPUFallbackOpEx final : public Operator<HIPContext> {
public:
USE_OPERATOR_FUNCTIONS(HIPContext);
explicit GPUFallbackOpEx(const OperatorDef& def, Workspace* ws)
: Operator<HIPContext>(def, ws) {
CAFFE_ENFORCE_EQ(def.device_option().device_type(), PROTO_HIP);
OperatorDef base_def_(def);
base_def_.clear_device_option();
base_def_.mutable_device_option()->set_device_type(PROTO_CPU);
for (const string& name : def.input()) {
local_input_blobs_.push_back(local_ws_.CreateBlob(name));
TORCH_CHECK_NOTNULL(local_input_blobs_.back());
}
base_op_ = CreateOperator(base_def_, &local_ws_);
for (const string& name : def.output()) {
local_output_blobs_.push_back(local_ws_.GetBlob(name));
TORCH_CHECK_NOTNULL(local_output_blobs_.back());
}
}
bool RunOnDevice() override {
for (const auto i : c10::irange(InputSize())) {
if (this->InputIsTensorType(i, HIP)) {
BlobGetMutableTensor(local_input_blobs_[i], CPU)->CopyFrom(Input(i));
} else {
VLOG(1) << "Input " << i << " is not TensorHIP. Skipping copy.";
local_input_blobs_[i]->ShareExternal(
const_cast<void*>(OperatorBase::Inputs()[i]->GetRaw()), OperatorBase::Inputs()[i]->meta());
}
}
if (!base_op_->Run()) {
LOG(ERROR) << "Base op run failed in GPUFallbackOp. Def: "
<< ProtoDebugString(this->debug_def());
return false;
}
for (const auto i : c10::irange(OutputSize())) {
if (SkipOutputCopy::Contains(i)) {
VLOG(1) << "Copy output: index " << i << " skipped.";
continue;
}
CAFFE_ENFORCE(
BlobIsTensorType(*local_output_blobs_[i], CPU), "GPU fallback op currently does not support non-TensorCPU "
"output type who needs copying.");
Output(i)->CopyFrom(local_output_blobs_[i]->template Get<TensorCPU>());
}
return true;
}
protected:
Workspace local_ws_;
vector<Blob*> local_input_blobs_;
vector<Blob*> local_output_blobs_;
unique_ptr<OperatorBase> base_op_;
};
using GPUFallbackOp = GPUFallbackOpEx<SkipIndices<>>;
}
#endif ### |
#ifndef CAFFE2_OPERATORS_OPERATOR_FALLBACK_H_
#define CAFFE2_OPERATORS_OPERATOR_FALLBACK_H_
#include "caffe2/core/common.h"
#include "caffe2/core/context.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/operator.h"
#include "caffe2/proto/caffe2_pb.h"
namespace caffe2 {
template <typename SkipOutputCopy>
class GPUFallbackOpEx final : public Operator<CUDAContext> {
public:
USE_OPERATOR_FUNCTIONS(CUDAContext);
explicit GPUFallbackOpEx(const OperatorDef& def, Workspace* ws)
: Operator<CUDAContext>(def, ws) {
CAFFE_ENFORCE_EQ(def.device_option().device_type(), PROTO_CUDA);
OperatorDef base_def_(def);
base_def_.clear_device_option();
base_def_.mutable_device_option()->set_device_type(PROTO_CPU);
for (const string& name : def.input()) {
local_input_blobs_.push_back(local_ws_.CreateBlob(name));
TORCH_CHECK_NOTNULL(local_input_blobs_.back());
}
base_op_ = CreateOperator(base_def_, &local_ws_);
for (const string& name : def.output()) {
local_output_blobs_.push_back(local_ws_.GetBlob(name));
TORCH_CHECK_NOTNULL(local_output_blobs_.back());
}
}
bool RunOnDevice() override {
for (const auto i : c10::irange(InputSize())) {
if (this->InputIsTensorType(i, CUDA)) {
BlobGetMutableTensor(local_input_blobs_[i], CPU)->CopyFrom(Input(i));
} else {
VLOG(1) << "Input " << i << " is not TensorCUDA. Skipping copy.";
local_input_blobs_[i]->ShareExternal(
const_cast<void*>(OperatorBase::Inputs()[i]->GetRaw()), OperatorBase::Inputs()[i]->meta());
}
}
if (!base_op_->Run()) {
LOG(ERROR) << "Base op run failed in GPUFallbackOp. Def: "
<< ProtoDebugString(this->debug_def());
return false;
}
for (const auto i : c10::irange(OutputSize())) {
if (SkipOutputCopy::Contains(i)) {
VLOG(1) << "Copy output: index " << i << " skipped.";
continue;
}
CAFFE_ENFORCE(
BlobIsTensorType(*local_output_blobs_[i], CPU), "GPU fallback op currently does not support non-TensorCPU "
"output type who needs copying.");
Output(i)->CopyFrom(local_output_blobs_[i]->template Get<TensorCPU>());
}
return true;
}
protected:
Workspace local_ws_;
vector<Blob*> local_input_blobs_;
vector<Blob*> local_output_blobs_;
unique_ptr<OperatorBase> base_op_;
};
using GPUFallbackOp = GPUFallbackOpEx<SkipIndices<>>;
}
#endif
### |
#include <iostream>
#include "caffe2/core/operator.h"
#include "caffe2/operators/hip/operator_fallback_gpu.h"
#include <gtest/gtest.h>
namespace caffe2 {
class IncrementByOneOp final : public Operator<CPUContext> {
public:
template <class... Args>
explicit IncrementByOneOp(Args&&... args)
: Operator<CPUContext>(std::forward<Args>(args)...) {}
bool RunOnDevice() override {
const auto& in = Input(0);
auto* out = Output(0, in.sizes(), at::dtype<float>());
const float* in_data = in.template data<float>();
float* out_data = out->template mutable_data<float>();
for (int i = 0; i < in.numel(); ++i) {
out_data[i] = in_data[i] + 1.f;
}
return true;
}
};
OPERATOR_SCHEMA(IncrementByOne)
.NumInputs(1).NumOutputs(1).AllowInplace({{0, 0}});
REGISTER_CPU_OPERATOR(IncrementByOne, IncrementByOneOp);
REGISTER_HIP_OPERATOR(IncrementByOne, GPUFallbackOp);
TEST(OperatorFallbackTest, IncrementByOneOp) {
OperatorDef op_def = CreateOperatorDef(
"IncrementByOne", "", vector<string>{"X"}, vector<string>{"X"});
Workspace ws;
Tensor source_tensor(vector<int64_t>{2, 3}, CPU);
for (int i = 0; i < 6; ++i) {
source_tensor.mutable_data<float>()[i] = i;
}
BlobGetMutableTensor(ws.CreateBlob("X"), CPU)->CopyFrom(source_tensor);
unique_ptr<OperatorBase> op(CreateOperator(op_def, &ws));
EXPECT_TRUE(op.get() != nullptr);
EXPECT_TRUE(op->Run());
const TensorCPU& output = ws.GetBlob("X")->Get<TensorCPU>();
EXPECT_EQ(output.dim(), 2);
EXPECT_EQ(output.size(0), 2);
EXPECT_EQ(output.size(1), 3);
for (int i = 0; i < 6; ++i) {
EXPECT_EQ(output.data<float>()[i], i + 1);
}
}
TEST(OperatorFallbackTest, GPUIncrementByOneOp) {
if (!HasHipGPU()) return;
OperatorDef op_def = CreateOperatorDef(
"IncrementByOne", "", vector<string>{"X"}, vector<string>{"X"});
op_def.mutable_device_option()->set_device_type(PROTO_HIP);
Workspace ws;
Tensor source_tensor(vector<int64_t>{2, 3}, CPU);
for (int i = 0; i < 6; ++i) {
source_tensor.mutable_data<float>()[i] = i;
}
BlobGetMutableTensor(ws.CreateBlob("X"), HIP)->CopyFrom(source_tensor);
unique_ptr<OperatorBase> op(CreateOperator(op_def, &ws));
EXPECT_TRUE(op.get() != nullptr);
EXPECT_TRUE(op->Run());
const TensorHIP& output = ws.GetBlob("X")->Get<TensorHIP>();
Tensor output_cpu(output, CPU);
EXPECT_EQ(output.dim(), 2);
EXPECT_EQ(output.size(0), 2);
EXPECT_EQ(output.size(1), 3);
for (int i = 0; i < 6; ++i) {
EXPECT_EQ(output_cpu.data<float>()[i], i + 1);
}
}
} ### |
#include <iostream>
#include "caffe2/core/operator.h"
#include "caffe2/operators/operator_fallback_gpu.h"
#include <gtest/gtest.h>
namespace caffe2 {
class IncrementByOneOp final : public Operator<CPUContext> {
public:
template <class... Args>
explicit IncrementByOneOp(Args&&... args)
: Operator<CPUContext>(std::forward<Args>(args)...) {}
bool RunOnDevice() override {
const auto& in = Input(0);
auto* out = Output(0, in.sizes(), at::dtype<float>());
const float* in_data = in.template data<float>();
float* out_data = out->template mutable_data<float>();
for (int i = 0; i < in.numel(); ++i) {
out_data[i] = in_data[i] + 1.f;
}
return true;
}
};
OPERATOR_SCHEMA(IncrementByOne)
.NumInputs(1).NumOutputs(1).AllowInplace({{0, 0}});
REGISTER_CPU_OPERATOR(IncrementByOne, IncrementByOneOp);
REGISTER_CUDA_OPERATOR(IncrementByOne, GPUFallbackOp);
TEST(OperatorFallbackTest, IncrementByOneOp) {
OperatorDef op_def = CreateOperatorDef(
"IncrementByOne", "", vector<string>{"X"}, vector<string>{"X"});
Workspace ws;
Tensor source_tensor(vector<int64_t>{2, 3}, CPU);
for (int i = 0; i < 6; ++i) {
source_tensor.mutable_data<float>()[i] = i;
}
BlobGetMutableTensor(ws.CreateBlob("X"), CPU)->CopyFrom(source_tensor);
unique_ptr<OperatorBase> op(CreateOperator(op_def, &ws));
EXPECT_TRUE(op.get() != nullptr);
EXPECT_TRUE(op->Run());
const TensorCPU& output = ws.GetBlob("X")->Get<TensorCPU>();
EXPECT_EQ(output.dim(), 2);
EXPECT_EQ(output.size(0), 2);
EXPECT_EQ(output.size(1), 3);
for (int i = 0; i < 6; ++i) {
EXPECT_EQ(output.data<float>()[i], i + 1);
}
}
TEST(OperatorFallbackTest, GPUIncrementByOneOp) {
if (!HasCudaGPU()) return;
OperatorDef op_def = CreateOperatorDef(
"IncrementByOne", "", vector<string>{"X"}, vector<string>{"X"});
op_def.mutable_device_option()->set_device_type(PROTO_CUDA);
Workspace ws;
Tensor source_tensor(vector<int64_t>{2, 3}, CPU);
for (int i = 0; i < 6; ++i) {
source_tensor.mutable_data<float>()[i] = i;
}
BlobGetMutableTensor(ws.CreateBlob("X"), CUDA)->CopyFrom(source_tensor);
unique_ptr<OperatorBase> op(CreateOperator(op_def, &ws));
EXPECT_TRUE(op.get() != nullptr);
EXPECT_TRUE(op->Run());
const TensorCUDA& output = ws.GetBlob("X")->Get<TensorCUDA>();
Tensor output_cpu(output, CPU);
EXPECT_EQ(output.dim(), 2);
EXPECT_EQ(output.size(0), 2);
EXPECT_EQ(output.size(1), 3);
for (int i = 0; i < 6; ++i) {
EXPECT_EQ(output_cpu.data<float>()[i], i + 1);
}
}
}
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/core/TensorBase.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <ATen/hip\ApplyGridUtils.cuh>
#include <ATen/hip/detail\OffsetCalculator.cuh>
#include <ATen/native/hip\Loops.cuh>
namespace at::native {
// -----------------------------------
// prelu
// -----------------------------------
void prelu_kernel(TensorIterator &iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, iter.dtype(), "prelu_hip", [&] {
gpu_kernel(iter,
[] GPU_LAMBDA (scalar_t input, scalar_t weight) -> scalar_t {
return (input > 0) ? input : weight * input;
});
});
}
void prelu_backward_kernel(TensorIterator &iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, iter.dtype(), "prelu_backward_hip", [&] {
gpu_kernel_multiple_outputs(iter,
[] GPU_LAMBDA (scalar_t input, scalar_t weight, scalar_t grad) -> thrust::tuple<scalar_t, scalar_t> {
auto mask = input > 0;
auto grad_input = mask ? grad : weight * grad;
auto grad_weight = mask ? scalar_t{0} : input * grad;
return {grad_input, grad_weight};
});
});
}
REGISTER_DISPATCH(prelu_stub, &prelu_kernel);
REGISTER_DISPATCH(prelu_backward_stub, &prelu_backward_kernel);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/core/TensorBase.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <ATen/cuda/ApplyGridUtils.cuh>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/native/cuda/Loops.cuh>
namespace at::native {
// -----------------------------------
// prelu
// -----------------------------------
void prelu_kernel(TensorIterator &iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, iter.dtype(), "prelu_cuda", [&] {
gpu_kernel(iter,
[] GPU_LAMBDA (scalar_t input, scalar_t weight) -> scalar_t {
return (input > 0) ? input : weight * input;
});
});
}
void prelu_backward_kernel(TensorIterator &iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, iter.dtype(), "prelu_backward_cuda", [&] {
gpu_kernel_multiple_outputs(iter,
[] GPU_LAMBDA (scalar_t input, scalar_t weight, scalar_t grad) -> thrust::tuple<scalar_t, scalar_t> {
auto mask = input > 0;
auto grad_input = mask ? grad : weight * grad;
auto grad_weight = mask ? scalar_t{0} : input * grad;
return {grad_input, grad_weight};
});
});
}
REGISTER_DISPATCH(prelu_stub, &prelu_kernel);
REGISTER_DISPATCH(prelu_backward_stub, &prelu_backward_kernel);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/operators/order_switch_ops.h"
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(NHWC2NCHW, NHWC2NCHWOp<float, HIPContext>);
REGISTER_HIP_OPERATOR(NCHW2NHWC, NCHW2NHWCOp<float, HIPContext>);
} // namespace caffe2
### |
#include "caffe2/operators/order_switch_ops.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(NHWC2NCHW, NHWC2NCHWOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(NCHW2NHWC, NCHW2NHWCOp<float, CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/perplexity_op.h"
#include "caffe2/utils/math.h"
#include <thrust/device_vector.h>
#include <thrust/transform_reduce.h>
#include <thrust/system/hip/execution_policy.h>
namespace caffe2 {
struct perplexity_function
{
perplexity_function(float p) : pow(p) {}
__host__ __device__ float operator()(float x) const
{
return powf(1.0f/x, pow);
}
float pow;
};
template <>
bool PerplexityOp<float, HIPContext>::RunOnDevice() {
auto& X = Input(0);
TORCH_DCHECK_EQ(X.dim(), 1);
int N = X.dim32(0);
auto* Y = Output(0, vector<int64_t>(), at::dtype<float>());
float* Ydata = Y->template mutable_data<float>();
const float* Xdata = X.data<float>();
float perplexity = thrust::transform_reduce(
#if THRUST_VERSION >= 100800
thrust::hip::par.on(context_.hip_stream()),
#endif // THRUST_VERSION >= 100800
Xdata, Xdata + N,
perplexity_function(1.0f/N),
1.0f,
thrust::multiplies<float>());
math::Set<float, HIPContext>(1, perplexity, Ydata, &context_);
return true;
}
REGISTER_HIP_OPERATOR(Perplexity, PerplexityOp<float, HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/perplexity_op.h"
#include "caffe2/utils/math.h"
#include <thrust/device_vector.h>
#include <thrust/transform_reduce.h>
#include <thrust/system/cuda/execution_policy.h>
namespace caffe2 {
struct perplexity_function
{
perplexity_function(float p) : pow(p) {}
__host__ __device__ float operator()(float x) const
{
return powf(1.0f/x, pow);
}
float pow;
};
template <>
bool PerplexityOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
TORCH_DCHECK_EQ(X.dim(), 1);
int N = X.dim32(0);
auto* Y = Output(0, vector<int64_t>(), at::dtype<float>());
float* Ydata = Y->template mutable_data<float>();
const float* Xdata = X.data<float>();
float perplexity = thrust::transform_reduce(
#if THRUST_VERSION >= 100800
thrust::cuda::par.on(context_.cuda_stream()),
#endif // THRUST_VERSION >= 100800
Xdata, Xdata + N,
perplexity_function(1.0f/N),
1.0f,
thrust::multiplies<float>());
math::Set<float, CUDAContext>(1, perplexity, Ydata, &context_);
return true;
}
REGISTER_CUDA_OPERATOR(Perplexity, PerplexityOp<float, CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/prepend_dim_op.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(PrependDim, PrependDimOp<HIPContext>);
REGISTER_HIP_OPERATOR(MergeDim, MergeDimOp<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/prepend_dim_op.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(PrependDim, PrependDimOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(MergeDim, MergeDimOp<CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/reciprocal_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
ReciprocalGradientHIPKernel(const int N, const T* dY, const T* Y, T* dX) {
HIP_1D_KERNEL_LOOP(i, N) {
#if __HIP_ARCH__ >= 350
dX[i] = __ldg(dY + i) * (-__ldg(Y + i) * __ldg(Y + i));
#else
dX[i] = dY[i] * (-Y[i] * Y[i]);
#endif
}
}
} // namespace
template <>
template <typename T>
bool ReciprocalGradientFunctor<HIPContext>::Forward(
const std::vector<int>& Y_dims,
const std::vector<int>& /* dY_dims */,
const T* Y,
const T* dY,
T* dX,
HIPContext* context) const {
const int size = std::accumulate(
Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies<int>());
hipLaunchKernelGGL(( ReciprocalGradientHIPKernel<T>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context->hip_stream(), size, dY, Y, dX);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(
Reciprocal,
UnaryElementwiseOp<
TensorTypes<float, double>,
HIPContext,
ReciprocalFunctor<HIPContext>>);
REGISTER_HIP_OPERATOR(
ReciprocalGradient,
BinaryElementwiseOp<
TensorTypes<float, double>,
HIPContext,
ReciprocalGradientFunctor<HIPContext>>);
} // namespace caffe2
### |
#include "caffe2/operators/reciprocal_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
ReciprocalGradientCUDAKernel(const int N, const T* dY, const T* Y, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = __ldg(dY + i) * (-__ldg(Y + i) * __ldg(Y + i));
#else
dX[i] = dY[i] * (-Y[i] * Y[i]);
#endif
}
}
} // namespace
template <>
template <typename T>
bool ReciprocalGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& Y_dims,
const std::vector<int>& /* dY_dims */,
const T* Y,
const T* dY,
T* dX,
CUDAContext* context) const {
const int size = std::accumulate(
Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies<int>());
ReciprocalGradientCUDAKernel<T>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, dY, Y, dX);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(
Reciprocal,
UnaryElementwiseOp<
TensorTypes<float, double>,
CUDAContext,
ReciprocalFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
ReciprocalGradient,
BinaryElementwiseOp<
TensorTypes<float, double>,
CUDAContext,
ReciprocalGradientFunctor<CUDAContext>>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/relu_n_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
ReluNHIPKernel(const int N, const T threshold, const T* X, T* Y) {
HIP_1D_KERNEL_LOOP(i, N) {
#if __HIP_ARCH__ >= 350
Y[i] = __ldg(X + i) > 0
? (__ldg(X + i) < threshold ? __ldg(X + i) : threshold)
: T(0);
#else
Y[i] = X[i] > 0 ? (X[i] < threshold ? X[i] : threshold) : T(0);
#endif
}
}
template <typename T>
__global__ void ReluNGradientHIPKernel(
const int N,
const T threshold,
const T* dY,
const T* Y,
T* dX) {
HIP_1D_KERNEL_LOOP(i, N) {
#if __HIP_ARCH__ >= 350
dX[i] = (__ldg(Y + i) > 0 && __ldg(Y + i) < threshold) ? dY[i] : T(0);
#else
dX[i] = (Y[i] > 0 && Y[i] < threshold) ? dY[i] : T(0);
#endif
}
}
} // namespace
template <>
template <typename T>
bool ReluNFunctor<HIPContext>::
operator()(const int N, const T* X, T* Y, HIPContext* context) const {
hipLaunchKernelGGL(( ReluNHIPKernel<T>)
, dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context->hip_stream(), N, n, X, Y);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
template <typename T>
bool ReluNGradientFunctor<HIPContext>::Forward(
const std::vector<int>& Y_dims,
const std::vector<int>& /* dY_dims */,
const T* Y,
const T* dY,
T* dX,
HIPContext* context) const {
const int size = std::accumulate(
Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies<int>());
hipLaunchKernelGGL(( ReluNGradientHIPKernel<T>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context->hip_stream(), size, n, dY, Y, dX);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(
ReluN,
UnaryElementwiseWithArgsOp<
TensorTypes<float>,
HIPContext,
ReluNFunctor<HIPContext>>);
REGISTER_HIP_OPERATOR(
ReluNGradient,
BinaryElementwiseWithArgsOp<
TensorTypes<float>,
HIPContext,
ReluNGradientFunctor<HIPContext>>);
} // namespace caffe2
### |
#include "caffe2/operators/relu_n_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
ReluNCUDAKernel(const int N, const T threshold, const T* X, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
Y[i] = __ldg(X + i) > 0
? (__ldg(X + i) < threshold ? __ldg(X + i) : threshold)
: T(0);
#else
Y[i] = X[i] > 0 ? (X[i] < threshold ? X[i] : threshold) : T(0);
#endif
}
}
template <typename T>
__global__ void ReluNGradientCUDAKernel(
const int N,
const T threshold,
const T* dY,
const T* Y,
T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = (__ldg(Y + i) > 0 && __ldg(Y + i) < threshold) ? dY[i] : T(0);
#else
dX[i] = (Y[i] > 0 && Y[i] < threshold) ? dY[i] : T(0);
#endif
}
}
} // namespace
template <>
template <typename T>
bool ReluNFunctor<CUDAContext>::
operator()(const int N, const T* X, T* Y, CUDAContext* context) const {
ReluNCUDAKernel<T>
<<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, n, X, Y);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
template <typename T>
bool ReluNGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& Y_dims,
const std::vector<int>& /* dY_dims */,
const T* Y,
const T* dY,
T* dX,
CUDAContext* context) const {
const int size = std::accumulate(
Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies<int>());
ReluNGradientCUDAKernel<T>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, n, dY, Y, dX);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(
ReluN,
UnaryElementwiseWithArgsOp<
TensorTypes<float>,
CUDAContext,
ReluNFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
ReluNGradient,
BinaryElementwiseWithArgsOp<
TensorTypes<float>,
CUDAContext,
ReluNGradientFunctor<CUDAContext>>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/replace_nan_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
replace_nan_kernel(const T value, const int64_t size, const T* X, T* Y) {
HIP_1D_KERNEL_LOOP(i, size) {
if (isnan(X[i])) {
Y[i] = value;
} else {
Y[i] = X[i];
}
}
}
} // namespace
template <>
template <typename T>
void ReplaceNaNOp<HIPContext>::ReplaceNaN(
const T& value,
const int64_t size,
const T* X,
T* Y) {
hipLaunchKernelGGL(( replace_nan_kernel),
dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context_.hip_stream(), value, size, X, Y);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
REGISTER_HIP_OPERATOR(ReplaceNaN, ReplaceNaNOp<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/replace_nan_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
replace_nan_kernel(const T value, const int64_t size, const T* X, T* Y) {
CUDA_1D_KERNEL_LOOP(i, size) {
if (isnan(X[i])) {
Y[i] = value;
} else {
Y[i] = X[i];
}
}
}
} // namespace
template <>
template <typename T>
void ReplaceNaNOp<CUDAContext>::ReplaceNaN(
const T& value,
const int64_t size,
const T* X,
T* Y) {
replace_nan_kernel<<<
CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(value, size, X, Y);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
REGISTER_CUDA_OPERATOR(ReplaceNaN, ReplaceNaNOp<CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/reshape_op.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(Reshape, ReshapeOp<float, HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/reshape_op.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(Reshape, ReshapeOp<float, CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <gtest/gtest.h>
#include "caffe2/core/context.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/core/flags.h"
#include "caffe2/operators/reshape_op.h"
#include "caffe2/utils/math.h"
C10_DECLARE_string(caffe_test_root);
namespace caffe2 {
static void AddConstInput(
const vector<int64_t>& shape,
const float value,
const string& name,
Workspace* ws) {
DeviceOption option;
option.set_device_type(PROTO_HIP);
HIPContext context(option);
Blob* blob = ws->CreateBlob(name);
auto* tensor = BlobGetMutableTensor(blob, HIP);
tensor->Resize(shape);
math::Set<float, HIPContext>(
tensor->numel(), value, tensor->template mutable_data<float>(), &context);
return;
}
TEST(ReshapeOpGPUTest, testReshapeWithScalar) {
if (!HasHipGPU())
return;
Workspace ws;
OperatorDef def;
def.set_name("test_reshape");
def.set_type("Reshape");
def.add_input("X");
def.add_output("XNew");
def.add_output("OldShape");
def.add_arg()->CopyFrom(MakeArgument("shape", vector<int64_t>{1}));
def.mutable_device_option()->set_device_type(PROTO_HIP);
AddConstInput(vector<int64_t>(), 3.14, "X", &ws);
// execute the op
unique_ptr<OperatorBase> op(CreateOperator(def, &ws));
EXPECT_TRUE(op->Run());
Blob* XNew = ws.GetBlob("XNew");
const Tensor& XNewTensor = XNew->Get<Tensor>();
EXPECT_EQ(1, XNewTensor.dim());
EXPECT_EQ(1, XNewTensor.numel());
}
} // namespace caffe2
### |
#include <iostream>
#include <gtest/gtest.h>
#include "caffe2/core/context.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/flags.h"
#include "caffe2/operators/reshape_op.h"
#include "caffe2/utils/math.h"
C10_DECLARE_string(caffe_test_root);
namespace caffe2 {
static void AddConstInput(
const vector<int64_t>& shape,
const float value,
const string& name,
Workspace* ws) {
DeviceOption option;
option.set_device_type(PROTO_CUDA);
CUDAContext context(option);
Blob* blob = ws->CreateBlob(name);
auto* tensor = BlobGetMutableTensor(blob, CUDA);
tensor->Resize(shape);
math::Set<float, CUDAContext>(
tensor->numel(), value, tensor->template mutable_data<float>(), &context);
return;
}
TEST(ReshapeOpGPUTest, testReshapeWithScalar) {
if (!HasCudaGPU())
return;
Workspace ws;
OperatorDef def;
def.set_name("test_reshape");
def.set_type("Reshape");
def.add_input("X");
def.add_output("XNew");
def.add_output("OldShape");
def.add_arg()->CopyFrom(MakeArgument("shape", vector<int64_t>{1}));
def.mutable_device_option()->set_device_type(PROTO_CUDA);
AddConstInput(vector<int64_t>(), 3.14, "X", &ws);
// execute the op
unique_ptr<OperatorBase> op(CreateOperator(def, &ws));
EXPECT_TRUE(op->Run());
Blob* XNew = ws.GetBlob("XNew");
const Tensor& XNewTensor = XNew->Get<Tensor>();
EXPECT_EQ(1, XNewTensor.dim());
EXPECT_EQ(1, XNewTensor.numel());
}
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/reverse_packed_segs_op.h"
namespace caffe2 {
namespace {
template <typename T, typename LengthType>
__global__
void ReversePackedSegments_kernel(
size_t max_length,
size_t batch_size,
size_t block_size,
const LengthType* lengths_ptr,
const T* data_ptr,
T* rev_data_ptr) {
const int block_id = blockIdx.x;
// index into [0, batch_size)
const int batch = block_id / max_length;
// index into [0, segment)
const int segment = block_id % max_length;
if (batch >= batch_size || segment >= max_length) return;
const int seg_length = lengths_ptr[batch];
// unique data pointer for this CTA
const T* local_data_ptr = data_ptr + (segment * batch_size + batch) * block_size;
// unique pointer for result
T* local_rev_data_ptr;
if (segment < seg_length) {
local_rev_data_ptr = rev_data_ptr + ((seg_length - 1 - segment) * batch_size + batch) * block_size;
} else {
local_rev_data_ptr = rev_data_ptr + (segment * batch_size + batch) * block_size;
}
// copy using 1 element / thread for now
for (int idx = threadIdx.x; idx < block_size; idx+=blockDim.x) {
local_rev_data_ptr[idx] = local_data_ptr[idx];
}
}
} // namespace
// specialization of DoRunWithLengthType
template <>
template <typename T, typename LengthType>
void ReversePackedSegsOp<HIPContext>::DoRunWithLengthType() {
const auto& data = Input(DATA);
const auto& lengths = Input(LENGTHS);
CAFFE_ENFORCE(
data.dim() == 3,
"DATA should be 3-D tensor <lengths, "
"segments, embeddings>");
CAFFE_ENFORCE(lengths.dim() == 1, "LENGTH should be 1-D");
auto* output = Output(0, data.sizes(), at::dtype<T>());
const auto max_length = data.size(0);
const auto batch_size = data.size(1);
const auto block_size = data.size(2);
CAFFE_ENFORCE(
lengths.sizes()[0] == batch_size,
"lenths size should be"
" equal to batch size");
const T* data_ptr = data.template data<T>();
const LengthType* lengths_ptr = lengths.template data<LengthType>();
// reversed data
T* rev_data_ptr = output->template mutable_data<T>();
const int grid = max_length * batch_size;
hipLaunchKernelGGL(( ReversePackedSegments_kernel<T,LengthType>), dim3(grid), dim3(512), 0, context_.hip_stream(),
max_length,
batch_size,
block_size,
lengths_ptr,
data_ptr,
rev_data_ptr);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
REGISTER_HIP_OPERATOR(ReversePackedSegs, ReversePackedSegsOp<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/reverse_packed_segs_op.h"
namespace caffe2 {
namespace {
template <typename T, typename LengthType>
__global__
void ReversePackedSegments_kernel(
size_t max_length,
size_t batch_size,
size_t block_size,
const LengthType* lengths_ptr,
const T* data_ptr,
T* rev_data_ptr) {
const int block_id = blockIdx.x;
// index into [0, batch_size)
const int batch = block_id / max_length;
// index into [0, segment)
const int segment = block_id % max_length;
if (batch >= batch_size || segment >= max_length) return;
const int seg_length = lengths_ptr[batch];
// unique data pointer for this CTA
const T* local_data_ptr = data_ptr + (segment * batch_size + batch) * block_size;
// unique pointer for result
T* local_rev_data_ptr;
if (segment < seg_length) {
local_rev_data_ptr = rev_data_ptr + ((seg_length - 1 - segment) * batch_size + batch) * block_size;
} else {
local_rev_data_ptr = rev_data_ptr + (segment * batch_size + batch) * block_size;
}
// copy using 1 element / thread for now
for (int idx = threadIdx.x; idx < block_size; idx+=blockDim.x) {
local_rev_data_ptr[idx] = local_data_ptr[idx];
}
}
} // namespace
// specialization of DoRunWithLengthType
template <>
template <typename T, typename LengthType>
void ReversePackedSegsOp<CUDAContext>::DoRunWithLengthType() {
const auto& data = Input(DATA);
const auto& lengths = Input(LENGTHS);
CAFFE_ENFORCE(
data.dim() == 3,
"DATA should be 3-D tensor <lengths, "
"segments, embeddings>");
CAFFE_ENFORCE(lengths.dim() == 1, "LENGTH should be 1-D");
auto* output = Output(0, data.sizes(), at::dtype<T>());
const auto max_length = data.size(0);
const auto batch_size = data.size(1);
const auto block_size = data.size(2);
CAFFE_ENFORCE(
lengths.sizes()[0] == batch_size,
"lenths size should be"
" equal to batch size");
const T* data_ptr = data.template data<T>();
const LengthType* lengths_ptr = lengths.template data<LengthType>();
// reversed data
T* rev_data_ptr = output->template mutable_data<T>();
const int grid = max_length * batch_size;
ReversePackedSegments_kernel<T,LengthType><<<grid, 512, 0, context_.cuda_stream()>>>(
max_length,
batch_size,
block_size,
lengths_ptr,
data_ptr,
rev_data_ptr);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
REGISTER_CUDA_OPERATOR(ReversePackedSegs, ReversePackedSegsOp<CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/rsqrt_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
RsqrtGradientHIPKernel(const int size, const T* dY, const T* Y, T* dX) {
HIP_1D_KERNEL_LOOP(i, size) {
#if __HIP_ARCH__ >= 350
dX[i] = __ldg(dY + i) * math::utils::Cube<T>(__ldg(Y + i)) *
static_cast<T>(-0.5);
#else
dX[i] = dY[i] * math::utils::Cube<T>(Y[i]) * static_cast<T>(-0.5);
#endif
}
}
} // namespace
template <>
template <typename T>
bool RsqrtGradientFunctor<HIPContext>::Forward(
const std::vector<int>& dY_dims,
const std::vector<int>& /* Y_dims */,
const T* dY,
const T* Y,
T* dX,
HIPContext* context) const {
const int size = std::accumulate(
dY_dims.cbegin(), dY_dims.cend(), 1, std::multiplies<int>());
hipLaunchKernelGGL(( RsqrtGradientHIPKernel<T>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context->hip_stream(), size, dY, Y, dX);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(
Rsqrt,
UnaryElementwiseOp<
TensorTypes<float>,
HIPContext,
RsqrtFunctor<HIPContext>>);
REGISTER_HIP_OPERATOR(
RsqrtGradient,
BinaryElementwiseOp<
TensorTypes<float>,
HIPContext,
RsqrtGradientFunctor<HIPContext>>);
} // namespace caffe2
### |
#include "caffe2/operators/rsqrt_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
RsqrtGradientCUDAKernel(const int size, const T* dY, const T* Y, T* dX) {
CUDA_1D_KERNEL_LOOP(i, size) {
#if __CUDA_ARCH__ >= 350
dX[i] = __ldg(dY + i) * math::utils::Cube<T>(__ldg(Y + i)) *
static_cast<T>(-0.5);
#else
dX[i] = dY[i] * math::utils::Cube<T>(Y[i]) * static_cast<T>(-0.5);
#endif
}
}
} // namespace
template <>
template <typename T>
bool RsqrtGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& dY_dims,
const std::vector<int>& /* Y_dims */,
const T* dY,
const T* Y,
T* dX,
CUDAContext* context) const {
const int size = std::accumulate(
dY_dims.cbegin(), dY_dims.cend(), 1, std::multiplies<int>());
RsqrtGradientCUDAKernel<T>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, dY, Y, dX);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(
Rsqrt,
UnaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
RsqrtFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
RsqrtGradient,
BinaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
RsqrtGradientFunctor<CUDAContext>>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/core/TensorBase.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <ATen/hip\ApplyGridUtils.cuh>
#include <ATen/hip/detail\OffsetCalculator.cuh>
#include <ATen/native/hip\Loops.cuh>
namespace at::native {
namespace {
void silu_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"silu_hip",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
const opmath_t x_acc = static_cast<opmath_t>(x);
return x_acc / (opmath_t(1) + c10::hip::compat::exp(-x_acc));
});
});
}
void silu_backward_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"silu_backward_hip",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
const opmath_t dy_acc = static_cast<opmath_t>(dy);
const opmath_t x_acc = static_cast<opmath_t>(x);
const opmath_t s_acc =
opmath_t(1) / (opmath_t(1) + c10::hip::compat::exp(-x_acc));
return dy_acc * s_acc * (opmath_t(1) + x_acc * (opmath_t(1) - s_acc));
});
});
}
} // namespace
REGISTER_DISPATCH(silu_stub, &silu_kernel);
REGISTER_DISPATCH(silu_backward_stub, &silu_backward_kernel);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/core/TensorBase.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <ATen/cuda/ApplyGridUtils.cuh>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/native/cuda/Loops.cuh>
namespace at::native {
namespace {
void silu_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"silu_cuda",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
const opmath_t x_acc = static_cast<opmath_t>(x);
return x_acc / (opmath_t(1) + c10::cuda::compat::exp(-x_acc));
});
});
}
void silu_backward_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"silu_backward_cuda",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
const opmath_t dy_acc = static_cast<opmath_t>(dy);
const opmath_t x_acc = static_cast<opmath_t>(x);
const opmath_t s_acc =
opmath_t(1) / (opmath_t(1) + c10::cuda::compat::exp(-x_acc));
return dy_acc * s_acc * (opmath_t(1) + x_acc * (opmath_t(1) - s_acc));
});
});
}
} // namespace
REGISTER_DISPATCH(silu_stub, &silu_kernel);
REGISTER_DISPATCH(silu_backward_stub, &silu_backward_kernel);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/scale_op.h"
namespace caffe2 {
template <>
bool ScaleOp<HIPContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<at::Half, float>>::call(this, Input(0));
}
REGISTER_HIP_OPERATOR(Scale, ScaleOp<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/scale_op.h"
namespace caffe2 {
template <>
bool ScaleOp<CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<at::Half, float>>::call(this, Input(0));
}
REGISTER_CUDA_OPERATOR(Scale, ScaleOp<CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/hip/common_gpu.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/selu_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void SeluKernel(const int N, const T* X, T* Y, T alpha_, T lambda_) {
HIP_1D_KERNEL_LOOP(i, N) {
Y[i] = lambda_ * (X[i] > 0 ? X[i] : alpha_ * expf(X[i]) - alpha_);
}
}
template <typename T>
__global__ void SeluGradientKernel(
const int N,
const T* Y,
const T* dY,
T* dX,
T alpha_,
T lambda_) {
const T c = lambda_ * alpha_;
HIP_1D_KERNEL_LOOP(i, N) {
// Reuse Y[i] to avoid computing exp(X[i])
dX[i] = Y[i] > 0 ? lambda_ * dY[i] : dY[i] * (Y[i] + c);
}
}
} // namespace
template <>
bool SeluOp<float, HIPContext>::RunOnDevice() {
auto& X = Input(0);
CAFFE_ENFORCE_GT(X.numel(), 0);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
hipLaunchKernelGGL(( SeluKernel<float>)
, dim3(CAFFE_GET_BLOCKS(X.numel())),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context_.hip_stream(),
X.numel(),
X.data<float>(),
Y->template mutable_data<float>(),
alpha_,
lambda_);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
bool SeluGradientOp<float, HIPContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
CAFFE_ENFORCE_GT(Y.numel(), 0);
CAFFE_ENFORCE_EQ(dY.numel(), Y.numel());
auto* dX = Output(0, Y.sizes(), at::dtype<float>());
hipLaunchKernelGGL(( SeluGradientKernel<float>)
, dim3(CAFFE_GET_BLOCKS(Y.numel())),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context_.hip_stream(),
Y.numel(),
Y.data<float>(),
dY.data<float>(),
dX->template mutable_data<float>(),
alpha_,
lambda_);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(Selu, SeluOp<float, HIPContext>);
REGISTER_HIP_OPERATOR(SeluGradient, SeluGradientOp<float, HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/selu_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void SeluKernel(const int N, const T* X, T* Y, T alpha_, T lambda_) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = lambda_ * (X[i] > 0 ? X[i] : alpha_ * __expf(X[i]) - alpha_);
}
}
template <typename T>
__global__ void SeluGradientKernel(
const int N,
const T* Y,
const T* dY,
T* dX,
T alpha_,
T lambda_) {
const T c = lambda_ * alpha_;
CUDA_1D_KERNEL_LOOP(i, N) {
// Reuse Y[i] to avoid computing exp(X[i])
dX[i] = Y[i] > 0 ? lambda_ * dY[i] : dY[i] * (Y[i] + c);
}
}
} // namespace
template <>
bool SeluOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
CAFFE_ENFORCE_GT(X.numel(), 0);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
SeluKernel<float>
<<<CAFFE_GET_BLOCKS(X.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
X.numel(),
X.data<float>(),
Y->template mutable_data<float>(),
alpha_,
lambda_);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
bool SeluGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
CAFFE_ENFORCE_GT(Y.numel(), 0);
CAFFE_ENFORCE_EQ(dY.numel(), Y.numel());
auto* dX = Output(0, Y.sizes(), at::dtype<float>());
SeluGradientKernel<float>
<<<CAFFE_GET_BLOCKS(Y.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
Y.numel(),
Y.data<float>(),
dY.data<float>(),
dX->template mutable_data<float>(),
alpha_,
lambda_);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(Selu, SeluOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SeluGradient, SeluGradientOp<float, CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/shape_op.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(Shape, ShapeOp<HIPContext>);
}
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/shape_op.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(Shape, ShapeOp<CUDAContext>);
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/sigmoid_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void SigmoidHIPKernel(const int N, const T* X, T* Y);
template <>
__global__ void
SigmoidHIPKernel<float>(const int N, const float* X, float* Y) {
HIP_1D_KERNEL_LOOP(i, N) {
#if __HIP_ARCH__ >= 350
Y[i] = 1.0f / (1.0f + expf(-__ldg(X + i)));
#else
Y[i] = 1.0f / (1.0f + expf(-X[i]));
#endif
}
}
template <typename T>
__global__ void
SigmoidGradientHIPKernel(const int N, const T* dY, const T* Y, T* dX) {
HIP_1D_KERNEL_LOOP(i, N) {
#if __HIP_ARCH__ >= 350
dX[i] = __ldg(dY + i) * __ldg(Y + i) * (T(1) - __ldg(Y + i));
#else
dX[i] = dY[i] * Y[i] * (T(1) - Y[i]);
#endif
}
}
} // namespace
template <>
template <typename T>
bool SigmoidFunctor<HIPContext>::
operator()(const int N, const T* X, T* Y, HIPContext* context) const {
hipLaunchKernelGGL(( SigmoidHIPKernel<T>)
, dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context->hip_stream(), N, X, Y);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
template <typename T>
bool SigmoidGradientFunctor<HIPContext>::Forward(
const std::vector<int>& Y_dims,
const std::vector<int>& /* dY_dims */,
const T* Y,
const T* dY,
T* dX,
HIPContext* context) const {
const int size = std::accumulate(
Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies<int>());
hipLaunchKernelGGL(( SigmoidGradientHIPKernel<T>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context->hip_stream(), size, dY, Y, dX);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(
Sigmoid,
UnaryElementwiseOp<
TensorTypes<float>,
HIPContext,
SigmoidFunctor<HIPContext>>);
REGISTER_HIP_OPERATOR(
SigmoidGradient,
BinaryElementwiseOp<
TensorTypes<float>,
HIPContext,
SigmoidGradientFunctor<HIPContext>>);
} // namespace caffe2
### |
#include "caffe2/operators/sigmoid_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void SigmoidCUDAKernel(const int N, const T* X, T* Y);
template <>
__global__ void
SigmoidCUDAKernel<float>(const int N, const float* X, float* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
Y[i] = 1.0f / (1.0f + expf(-__ldg(X + i)));
#else
Y[i] = 1.0f / (1.0f + expf(-X[i]));
#endif
}
}
template <typename T>
__global__ void
SigmoidGradientCUDAKernel(const int N, const T* dY, const T* Y, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = __ldg(dY + i) * __ldg(Y + i) * (T(1) - __ldg(Y + i));
#else
dX[i] = dY[i] * Y[i] * (T(1) - Y[i]);
#endif
}
}
} // namespace
template <>
template <typename T>
bool SigmoidFunctor<CUDAContext>::
operator()(const int N, const T* X, T* Y, CUDAContext* context) const {
SigmoidCUDAKernel<T>
<<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, X, Y);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
template <typename T>
bool SigmoidGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& Y_dims,
const std::vector<int>& /* dY_dims */,
const T* Y,
const T* dY,
T* dX,
CUDAContext* context) const {
const int size = std::accumulate(
Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies<int>());
SigmoidGradientCUDAKernel<T>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, dY, Y, dX);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(
Sigmoid,
UnaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
SigmoidFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
SigmoidGradient,
BinaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
SigmoidGradientFunctor<CUDAContext>>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/sinh_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
namespace {
__global__ void SinhGradientHIPKernel(
const int N,
const float* dY,
const float* X,
float* dX) {
HIP_1D_KERNEL_LOOP(i, N) {
#if __HIP_ARCH__ >= 350
dX[i] = __ldg(dY + i) * coshf(__ldg(X + i));
#else
dX[i] = dY[i] * coshf(X[i]);
#endif
}
}
} // namespace
template <>
template <typename T>
bool SinhGradientFunctor<HIPContext>::Forward(
const std::vector<int>& /* dY_dims */,
const std::vector<int>& X_dims,
const T* dY,
const T* X,
T* dX,
HIPContext* context) const {
const int size = std::accumulate(
X_dims.cbegin(), X_dims.cend(), 1, std::multiplies<int>());
hipLaunchKernelGGL(( SinhGradientHIPKernel),
dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context->hip_stream(), size, dY, X, dX);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(
Sinh,
UnaryElementwiseOp<
TensorTypes<float>,
HIPContext,
SinhFunctor<HIPContext>>);
REGISTER_HIP_OPERATOR(
SinhGradient,
BinaryElementwiseOp<
TensorTypes<float>,
HIPContext,
SinhGradientFunctor<HIPContext>>);
} // namespace caffe2
### |
#include "caffe2/operators/sinh_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
__global__ void SinhGradientCUDAKernel(
const int N,
const float* dY,
const float* X,
float* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = __ldg(dY + i) * coshf(__ldg(X + i));
#else
dX[i] = dY[i] * coshf(X[i]);
#endif
}
}
} // namespace
template <>
template <typename T>
bool SinhGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& /* dY_dims */,
const std::vector<int>& X_dims,
const T* dY,
const T* X,
T* dX,
CUDAContext* context) const {
const int size = std::accumulate(
X_dims.cbegin(), X_dims.cend(), 1, std::multiplies<int>());
SinhGradientCUDAKernel<<<
CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, dY, X, dX);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(
Sinh,
UnaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
SinhFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
SinhGradient,
BinaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
SinhGradientFunctor<CUDAContext>>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/sin_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
SinGradientHIPKernel(const int N, const T* dY, const T* X, T* dX) {
HIP_1D_KERNEL_LOOP(i, N) {
#if __HIP_ARCH__ >= 350
dX[i] = __ldg(dY + i) * cos(__ldg(X + i));
#else
dX[i] = dY[i] * cos(X[i]);
#endif
}
}
} // namespace
template <>
template <typename T>
bool SinGradientFunctor<HIPContext>::Forward(
const std::vector<int>& X_dims,
const std::vector<int>& /* dY_dims */,
const T* X,
const T* dY,
T* dX,
HIPContext* context) const {
const int size = std::accumulate(
X_dims.cbegin(), X_dims.cend(), 1, std::multiplies<int>());
hipLaunchKernelGGL(( SinGradientHIPKernel<T>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context->hip_stream(), size, dY, X, dX);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(
Sin,
UnaryElementwiseOp<
TensorTypes<float>,
HIPContext,
SinFunctor<HIPContext>>);
REGISTER_HIP_OPERATOR(
SinGradient,
BinaryElementwiseOp<
TensorTypes<float>,
HIPContext,
SinGradientFunctor<HIPContext>>);
} // namespace caffe2
### |
#include "caffe2/operators/sin_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
SinGradientCUDAKernel(const int N, const T* dY, const T* X, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = __ldg(dY + i) * cos(__ldg(X + i));
#else
dX[i] = dY[i] * cos(X[i]);
#endif
}
}
} // namespace
template <>
template <typename T>
bool SinGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& X_dims,
const std::vector<int>& /* dY_dims */,
const T* X,
const T* dY,
T* dX,
CUDAContext* context) const {
const int size = std::accumulate(
X_dims.cbegin(), X_dims.cend(), 1, std::multiplies<int>());
SinGradientCUDAKernel<T>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, dY, X, dX);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(
Sin,
UnaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
SinFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
SinGradient,
BinaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
SinGradientFunctor<CUDAContext>>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/softplus_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void SoftplusKernel(const int N, const T* X, T* Y) {
HIP_1D_KERNEL_LOOP(i, N) {
Y[i] = log(exp(X[i]) + 1.0f);
}
}
template <typename T>
__global__ void
SoftplusGradientKernel(const int N, const T* Y, const T* dY, T* dX) {
HIP_1D_KERNEL_LOOP(i, N) {
const float nexpY = exp(-Y[i]);
dX[i] = dY[i] * (1 - nexpY);
}
}
} // namespace
template <>
bool SoftplusOp<float, HIPContext>::RunOnDevice() {
auto& X = Input(0);
TORCH_DCHECK_GT(X.numel(), 0);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
hipLaunchKernelGGL(( SoftplusKernel<float>)
, dim3(CAFFE_GET_BLOCKS(X.numel())),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context_.hip_stream(),
X.numel(), X.data<float>(), Y->template mutable_data<float>());
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
bool SoftplusGradientOp<float, HIPContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
TORCH_DCHECK_GT(Y.numel(), 0);
TORCH_DCHECK_EQ(dY.numel(), Y.numel());
auto* dX = Output(0, Y.sizes(), at::dtype<float>());
hipLaunchKernelGGL(( SoftplusGradientKernel<float>)
, dim3(CAFFE_GET_BLOCKS(Y.numel())),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context_.hip_stream(),
Y.numel(),
Y.data<float>(),
dY.data<float>(),
dX->template mutable_data<float>());
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(Softplus, SoftplusOp<float, HIPContext>);
REGISTER_HIP_OPERATOR(
SoftplusGradient,
SoftplusGradientOp<float, HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/softplus_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void SoftplusKernel(const int N, const T* X, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = log(exp(X[i]) + 1.0f);
}
}
template <typename T>
__global__ void
SoftplusGradientKernel(const int N, const T* Y, const T* dY, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
const float nexpY = exp(-Y[i]);
dX[i] = dY[i] * (1 - nexpY);
}
}
} // namespace
template <>
bool SoftplusOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
TORCH_DCHECK_GT(X.numel(), 0);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
SoftplusKernel<float>
<<<CAFFE_GET_BLOCKS(X.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
X.numel(), X.data<float>(), Y->template mutable_data<float>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
bool SoftplusGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
TORCH_DCHECK_GT(Y.numel(), 0);
TORCH_DCHECK_EQ(dY.numel(), Y.numel());
auto* dX = Output(0, Y.sizes(), at::dtype<float>());
SoftplusGradientKernel<float>
<<<CAFFE_GET_BLOCKS(Y.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
Y.numel(),
Y.data<float>(),
dY.data<float>(),
dX->template mutable_data<float>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(Softplus, SoftplusOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
SoftplusGradient,
SoftplusGradientOp<float, CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/softsign_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
namespace {
using c10::hip::compat::abs;
template <typename T>
inline __host__ __device__ T SquareHIP(const T x) {
return x * x;
}
template <typename T>
__global__ void SoftsignHIPKernel(const int N, const T* X, T* Y) {
HIP_1D_KERNEL_LOOP(i, N) {
#if __HIP_ARCH__ >= 350
Y[i] = __ldg(X + i) / (T(1) + abs(__ldg(X + i)));
#else
Y[i] = X[i] / (T(1) + abs(X[i]));
#endif
}
}
template <typename T>
__global__ void
SoftsignGradientHIPKernel(const int N, const T* dY, const T* X, T* dX) {
HIP_1D_KERNEL_LOOP(i, N) {
#if __HIP_ARCH__ >= 350
dX[i] = __ldg(dY + i) / SquareHIP(T(1) + abs(__ldg(X + i)));
#else
dX[i] = dY[i] / SquareHIP(T(1) + abs(X[i]));
#endif
}
}
} // namespace
template <>
template <typename T>
bool SoftsignFunctor<HIPContext>::
operator()(const int N, const T* X, T* Y, HIPContext* context) const {
hipLaunchKernelGGL(( SoftsignHIPKernel<T>)
, dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context->hip_stream(), N, X, Y);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
template <typename T>
bool SoftsignGradientFunctor<HIPContext>::Forward(
const std::vector<int>& X_dims,
const std::vector<int>& /* dY_dims */,
const T* X,
const T* dY,
T* dX,
HIPContext* context) const {
const int size = std::accumulate(
X_dims.cbegin(), X_dims.cend(), 1, std::multiplies<int>());
hipLaunchKernelGGL(( SoftsignGradientHIPKernel<T>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context->hip_stream(), size, dY, X, dX);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(
Softsign,
UnaryElementwiseOp<
TensorTypes<float>,
HIPContext,
SoftsignFunctor<HIPContext>>);
REGISTER_HIP_OPERATOR(
SoftsignGradient,
BinaryElementwiseOp<
TensorTypes<float>,
HIPContext,
SoftsignGradientFunctor<HIPContext>>);
} // namespace caffe2
### |
#include "caffe2/operators/softsign_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
using c10::cuda::compat::abs;
template <typename T>
inline __host__ __device__ T SquareCUDA(const T x) {
return x * x;
}
template <typename T>
__global__ void SoftsignCUDAKernel(const int N, const T* X, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
Y[i] = __ldg(X + i) / (T(1) + abs(__ldg(X + i)));
#else
Y[i] = X[i] / (T(1) + abs(X[i]));
#endif
}
}
template <typename T>
__global__ void
SoftsignGradientCUDAKernel(const int N, const T* dY, const T* X, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = __ldg(dY + i) / SquareCUDA(T(1) + abs(__ldg(X + i)));
#else
dX[i] = dY[i] / SquareCUDA(T(1) + abs(X[i]));
#endif
}
}
} // namespace
template <>
template <typename T>
bool SoftsignFunctor<CUDAContext>::
operator()(const int N, const T* X, T* Y, CUDAContext* context) const {
SoftsignCUDAKernel<T>
<<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, X, Y);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
template <typename T>
bool SoftsignGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& X_dims,
const std::vector<int>& /* dY_dims */,
const T* X,
const T* dY,
T* dX,
CUDAContext* context) const {
const int size = std::accumulate(
X_dims.cbegin(), X_dims.cend(), 1, std::multiplies<int>());
SoftsignGradientCUDAKernel<T>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, dY, X, dX);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(
Softsign,
UnaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
SoftsignFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
SoftsignGradient,
BinaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
SoftsignGradientFunctor<CUDAContext>>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/hip/operator_fallback_gpu.h"
#include "caffe2/operators/sparse_lp_regularizer_op.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(SparseLpRegularizer, GPUFallbackOp);
}
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/operator_fallback_gpu.h"
#include "caffe2/operators/sparse_lp_regularizer_op.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(SparseLpRegularizer, GPUFallbackOp);
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/hip/operator_fallback_gpu.h"
#include "caffe2/operators/sparse_normalize_op.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(
SparseNormalize,
GPUFallbackOp);
}
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/operator_fallback_gpu.h"
#include "caffe2/operators/sparse_normalize_op.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(
SparseNormalize,
GPUFallbackOp);
}
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/core/TensorBase.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <ATen/hip\ApplyGridUtils.cuh>
#include <ATen/hip/detail\OffsetCalculator.cuh>
#include <ATen/native/hip\Loops.cuh>
namespace at::native {
namespace {
void softplus_kernel(
TensorIteratorBase& iter,
const Scalar& beta_,
const Scalar& threshold_) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"softplus_hip",
[&]() {
using opmath_t = at::opmath_type<scalar_t>;
auto beta = beta_.to<opmath_t>();
auto threshold = threshold_.to<opmath_t>();
gpu_kernel(iter, [beta, threshold] GPU_LAMBDA(scalar_t a) -> scalar_t {
opmath_t aop = static_cast<opmath_t>(a);
return (aop * beta) > threshold
? aop
: (::log1p(::exp(aop * beta))) / beta;
});
});
}
void softplus_backward_kernel(
TensorIteratorBase& iter,
const Scalar& beta_,
const Scalar& threshold_) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"softplus_backward_hip",
[&]() {
using opmath_t = at::opmath_type<scalar_t>;
auto beta = beta_.to<opmath_t>();
auto threshold = threshold_.to<opmath_t>();
gpu_kernel(
iter,
[beta, threshold] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
opmath_t aop = static_cast<opmath_t>(a);
opmath_t bop = static_cast<opmath_t>(b);
opmath_t z = ::exp(bop * beta);
return (bop * beta) > threshold ? aop
: aop * z / (z + opmath_t(1.));
});
});
}
} // namespace
REGISTER_DISPATCH(softplus_stub, &softplus_kernel);
REGISTER_DISPATCH(softplus_backward_stub, &softplus_backward_kernel);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/core/TensorBase.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <ATen/cuda/ApplyGridUtils.cuh>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/native/cuda/Loops.cuh>
namespace at::native {
namespace {
void softplus_kernel(
TensorIteratorBase& iter,
const Scalar& beta_,
const Scalar& threshold_) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"softplus_cuda",
[&]() {
using opmath_t = at::opmath_type<scalar_t>;
auto beta = beta_.to<opmath_t>();
auto threshold = threshold_.to<opmath_t>();
gpu_kernel(iter, [beta, threshold] GPU_LAMBDA(scalar_t a) -> scalar_t {
opmath_t aop = static_cast<opmath_t>(a);
return (aop * beta) > threshold
? aop
: (::log1p(std::exp(aop * beta))) / beta;
});
});
}
void softplus_backward_kernel(
TensorIteratorBase& iter,
const Scalar& beta_,
const Scalar& threshold_) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"softplus_backward_cuda",
[&]() {
using opmath_t = at::opmath_type<scalar_t>;
auto beta = beta_.to<opmath_t>();
auto threshold = threshold_.to<opmath_t>();
gpu_kernel(
iter,
[beta, threshold] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
opmath_t aop = static_cast<opmath_t>(a);
opmath_t bop = static_cast<opmath_t>(b);
opmath_t z = std::exp(bop * beta);
return (bop * beta) > threshold ? aop
: aop * z / (z + opmath_t(1.));
});
});
}
} // namespace
REGISTER_DISPATCH(softplus_stub, &softplus_kernel);
REGISTER_DISPATCH(softplus_backward_stub, &softplus_backward_kernel);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/sparse_to_dense_op.h"
#include "caffe2/core/hip/common_gpu.h"
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/utils/hip/GpuAtomics.cuh"
namespace caffe2 {
template <typename TInd, typename TData>
__global__ void SparseToDenseKernel(
size_t N, int64_t block_nitems, const TInd* indices, const TData* vals, TData* dst) {
HIP_1D_KERNEL_LOOP(i, N) {
int idx = indices[i / block_nitems];
int dst_idx = block_nitems * idx + i % block_nitems;
gpu_atomic_add(&dst[dst_idx], vals[i]);
}
}
template <>
bool SparseToDenseOp<HIPContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<int32_t>>::call(
this, Input(INDICES));
}
template <>
template <typename TInd>
bool SparseToDenseOp<HIPContext>::DoRunWithType() {
return DispatchHelper<
TensorTypes2<
float,
int32_t>,
TInd>::call(this, Input(VALUES));
}
template <>
template <typename TInd, typename TData>
bool SparseToDenseOp<HIPContext>::DoRunWithType2() {
auto& sparse_indices = Input(INDICES);
CAFFE_ENFORCE_EQ(sparse_indices.dim(), 1);
auto& sparse_values = Input(VALUES);
CAFFE_ENFORCE_GE(sparse_values.dim(), 1);
CAFFE_ENFORCE_EQ(sparse_indices.numel(), sparse_values.dim(0));
const TInd* sparse_indices_vec = sparse_indices.template data<TInd>();
const int32_t sparse_indices_len = sparse_indices.dim32(0);
const int output_first_dim =
GetOutputFirstDim(sparse_indices_vec, sparse_indices_len);
auto shape = sparse_values.sizes().vec();
shape[0] = output_first_dim;
auto* output = Output(0, shape, at::dtype<TData>());
TData* output_data = output->template mutable_data<TData>();
math::Set<TData>(output->numel(), TData(0), output_data, &context_);
const auto block_nitems = sparse_values.size_from_dim(1);
const TData* sparse_values_vec = sparse_values.template data<TData>();
size_t N = block_nitems * sparse_indices_len;
CAFFE_ENFORCE_EQ(output->numel(), output_first_dim * block_nitems);
hipLaunchKernelGGL(( SparseToDenseKernel<TInd, TData>),
dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_HIP_NUM_THREADS), 0,
context_.hip_stream(),
N,
block_nitems,
sparse_indices_vec,
sparse_values_vec,
output_data
);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(SparseToDense, SparseToDenseOp<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/operators/sparse_to_dense_op.h"
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/GpuAtomics.cuh"
namespace caffe2 {
template <typename TInd, typename TData>
__global__ void SparseToDenseKernel(
size_t N, int64_t block_nitems, const TInd* indices, const TData* vals, TData* dst) {
CUDA_1D_KERNEL_LOOP(i, N) {
int idx = indices[i / block_nitems];
int dst_idx = block_nitems * idx + i % block_nitems;
gpu_atomic_add(&dst[dst_idx], vals[i]);
}
}
template <>
bool SparseToDenseOp<CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<int32_t>>::call(
this, Input(INDICES));
}
template <>
template <typename TInd>
bool SparseToDenseOp<CUDAContext>::DoRunWithType() {
return DispatchHelper<
TensorTypes2<
float,
int32_t>,
TInd>::call(this, Input(VALUES));
}
template <>
template <typename TInd, typename TData>
bool SparseToDenseOp<CUDAContext>::DoRunWithType2() {
auto& sparse_indices = Input(INDICES);
CAFFE_ENFORCE_EQ(sparse_indices.dim(), 1);
auto& sparse_values = Input(VALUES);
CAFFE_ENFORCE_GE(sparse_values.dim(), 1);
CAFFE_ENFORCE_EQ(sparse_indices.numel(), sparse_values.dim(0));
const TInd* sparse_indices_vec = sparse_indices.template data<TInd>();
const int32_t sparse_indices_len = sparse_indices.dim32(0);
const int output_first_dim =
GetOutputFirstDim(sparse_indices_vec, sparse_indices_len);
auto shape = sparse_values.sizes().vec();
shape[0] = output_first_dim;
auto* output = Output(0, shape, at::dtype<TData>());
TData* output_data = output->template mutable_data<TData>();
math::Set<TData>(output->numel(), TData(0), output_data, &context_);
const auto block_nitems = sparse_values.size_from_dim(1);
const TData* sparse_values_vec = sparse_values.template data<TData>();
size_t N = block_nitems * sparse_indices_len;
CAFFE_ENFORCE_EQ(output->numel(), output_first_dim * block_nitems);
SparseToDenseKernel<TInd, TData><<<
CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0,
context_.cuda_stream()>>>(
N,
block_nitems,
sparse_indices_vec,
sparse_values_vec,
output_data
);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(SparseToDense, SparseToDenseOp<CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/operators/spatial_batch_norm_op.h"
#include "caffe2/operators/hip/spatial_batch_norm_op_impl.cuh"
namespace caffe2 {
REGISTER_HIP_OPERATOR(SpatialBN, SpatialBNOp<HIPContext>);
REGISTER_HIP_OPERATOR(SpatialBNGradient, SpatialBNGradientOp<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/operators/spatial_batch_norm_op.h"
#include "caffe2/operators/spatial_batch_norm_op_impl.cuh"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(SpatialBN, SpatialBNOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(SpatialBNGradient, SpatialBNGradientOp<CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/operators/sqrt_op.h"
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(
Sqrt,
UnaryElementwiseOp<
TensorTypes<float>,
HIPContext,
SqrtFunctor<HIPContext>>);
} // namespace caffe2
### |
#include "caffe2/operators/sqrt_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(
Sqrt,
UnaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
SqrtFunctor<CUDAContext>>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/operators/sqr_op.h"
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(
Sqr,
UnaryElementwiseOp<
TensorTypes<float>,
HIPContext,
SqrFunctor<HIPContext>>);
} // namespace caffe2
### |
#include "caffe2/operators/sqr_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(
Sqr,
UnaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
SqrFunctor<CUDAContext>>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/stop_gradient.h"
namespace caffe2 {
REGISTER_HIP_OPERATOR(StopGradient, StopGradientOp<HIPContext>);
} // namespace caffe2
### |
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/stop_gradient.h"
namespace caffe2 {
REGISTER_CUDA_OPERATOR(StopGradient, StopGradientOp<CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "caffe2/core/hip/context_gpu.h"
#include "caffe2/operators/stump_func_op.h"
namespace caffe2 {
namespace {
template <typename TIN, typename TOUT>
__global__ void StumpFuncKernel(
const int N,
const TIN threshold,
const TOUT low_value,
const TOUT high_value,
const TIN* X,
TOUT* Y) {
HIP_1D_KERNEL_LOOP(i, N) {
Y[i] = (X[i] <= threshold) ? low_value : high_value;
}
}
} //
template <>
bool StumpFuncOp<float, float, HIPContext>::RunOnDevice() {
auto& in = Input(0);
const float* in_data = in.data<float>();
auto* out = Output(0, in.sizes(), at::dtype<float>());
float* out_data = out->template mutable_data<float>();
hipLaunchKernelGGL(( StumpFuncKernel), dim3(CAFFE_GET_BLOCKS(in.numel())), dim3(CAFFE_HIP_NUM_THREADS),
0, context_.hip_stream(),
in.numel(), threshold_, low_value_, high_value_, in_data, out_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(StumpFunc, StumpFuncOp<float, float, HIPContext>);
// NO_GRADIENT(StumpFuncGpu);
} // caffe2
### |
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/stump_func_op.h"
namespace caffe2 {
namespace {
template <typename TIN, typename TOUT>
__global__ void StumpFuncKernel(
const int N,
const TIN threshold,
const TOUT low_value,
const TOUT high_value,
const TIN* X,
TOUT* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = (X[i] <= threshold) ? low_value : high_value;
}
}
} //
template <>
bool StumpFuncOp<float, float, CUDAContext>::RunOnDevice() {
auto& in = Input(0);
const float* in_data = in.data<float>();
auto* out = Output(0, in.sizes(), at::dtype<float>());
float* out_data = out->template mutable_data<float>();
StumpFuncKernel<<<CAFFE_GET_BLOCKS(in.numel()), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
in.numel(), threshold_, low_value_, high_value_, in_data, out_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(StumpFunc, StumpFuncOp<float, float, CUDAContext>);
// NO_GRADIENT(StumpFuncGpu);
} // caffe2
### |
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/transform_reduce.h>
#include <thrust/system/hip/execution_policy.h>
#include "caffe2/operators/summarize_op.h"
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
struct SummaryStatsData {
T n;
T min;
T max;
T mean;
T M2;
void initialize() {
n = mean = M2 = 0;
min = std::numeric_limits<T>::max();
max = std::numeric_limits<T>::min();
}
T variance() { return (n == 1 ? 0 : M2 / (n - 1)); }
};
template <typename T>
struct summary_stats_unary_op {
__host__ __device__ SummaryStatsData<T> operator()(const T& x) const {
SummaryStatsData<T> result;
result.n = 1;
result.min = x;
result.max = x;
result.mean = x;
result.M2 = 0;
return result;
}
};
template <typename T>
struct summary_stats_binary_op
: public thrust::binary_function<const SummaryStatsData<T>&, const SummaryStatsData<T>&, SummaryStatsData<T> > {
__host__ __device__ SummaryStatsData<T> operator()(
const SummaryStatsData<T>& x, const SummaryStatsData <T>& y) const {
SummaryStatsData<T> result;
T n = x.n + y.n;
T delta = y.mean - x.mean;
T delta2 = delta * delta;
result.n = n;
result.min = thrust::min(x.min, y.min);
result.max = thrust::max(x.max, y.max);
result.mean = x.mean + delta * y.n / n;
result.M2 = x.M2 + y.M2;
result.M2 += delta2 * x.n * y.n / n;
return result;
}
};
}
template<>
bool SummarizeOp<float, HIPContext>::RunOnDevice() {
auto& X = Input(0);
const int N = X.numel();
TORCH_DCHECK_GT(N, 0);
thrust::device_ptr<float> Xdata(const_cast<float*>(X.data<float>()));
summary_stats_unary_op<float> unary_op;
summary_stats_binary_op<float> binary_op;
SummaryStatsData<float> init;
init.initialize();
SummaryStatsData<float> result = thrust::transform_reduce(
#if THRUST_VERSION >= 100800
thrust::hip::par.on(context_.hip_stream()), #endif
Xdata, Xdata + N, unary_op, init, binary_op);
float standard_deviation = std::sqrt(result.variance());
if (to_file_) {
(*log_file_) << result.min << " " << result.max << " " << result.mean << " "
<< standard_deviation << std::endl;
}
if (OutputSize()) {
auto* Y = Output(0, {4}, at::dtype<float>());
float output_buffer[NUM_STATS] = {result.min, result.max, result.mean, standard_deviation};
context_.CopyFromCPU<float>(
NUM_STATS, output_buffer, Y->template mutable_data<float>());
}
return true;
}
REGISTER_HIP_OPERATOR(Summarize, SummarizeOp<float, HIPContext>);
} ### |
#include <cuda.h>
#include <thrust/device_vector.h>
#include <thrust/transform_reduce.h>
#include <thrust/system/cuda/execution_policy.h>
#include "caffe2/operators/summarize_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
struct SummaryStatsData {
T n;
T min;
T max;
T mean;
T M2;
void initialize() {
n = mean = M2 = 0;
min = std::numeric_limits<T>::max();
max = std::numeric_limits<T>::min();
}
T variance() { return (n == 1 ? 0 : M2 / (n - 1)); }
};
template <typename T>
struct summary_stats_unary_op {
__host__ __device__ SummaryStatsData<T> operator()(const T& x) const {
SummaryStatsData<T> result;
result.n = 1;
result.min = x;
result.max = x;
result.mean = x;
result.M2 = 0;
return result;
}
};
template <typename T>
struct summary_stats_binary_op
: public thrust::binary_function<const SummaryStatsData<T>&, const SummaryStatsData<T>&, SummaryStatsData<T> > {
__host__ __device__ SummaryStatsData<T> operator()(
const SummaryStatsData<T>& x, const SummaryStatsData <T>& y) const {
SummaryStatsData<T> result;
T n = x.n + y.n;
T delta = y.mean - x.mean;
T delta2 = delta * delta;
result.n = n;
result.min = thrust::min(x.min, y.min);
result.max = thrust::max(x.max, y.max);
result.mean = x.mean + delta * y.n / n;
result.M2 = x.M2 + y.M2;
result.M2 += delta2 * x.n * y.n / n;
return result;
}
};
}
template<>
bool SummarizeOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
const int N = X.numel();
TORCH_DCHECK_GT(N, 0);
thrust::device_ptr<float> Xdata(const_cast<float*>(X.data<float>()));
summary_stats_unary_op<float> unary_op;
summary_stats_binary_op<float> binary_op;
SummaryStatsData<float> init;
init.initialize();
SummaryStatsData<float> result = thrust::transform_reduce(
#if THRUST_VERSION >= 100800
thrust::cuda::par.on(context_.cuda_stream()), #endif
Xdata, Xdata + N, unary_op, init, binary_op);
float standard_deviation = std::sqrt(result.variance());
if (to_file_) {
(*log_file_) << result.min << " " << result.max << " " << result.mean << " "
<< standard_deviation << std::endl;
}
if (OutputSize()) {
auto* Y = Output(0, {4}, at::dtype<float>());
float output_buffer[NUM_STATS] = {result.min, result.max, result.mean, standard_deviation};
context_.CopyFromCPU<float>(
NUM_STATS, output_buffer, Y->template mutable_data<float>());
}
return true;
}
REGISTER_CUDA_OPERATOR(Summarize, SummarizeOp<float, CUDAContext>);
}
### |
#include "hip/hip_runtime.h"
#include "caffe2/operators/swish_op.h"
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void SwishHIPKernel(const int N, const T* X, T* Y) {
HIP_1D_KERNEL_LOOP(i, N) {
#if __HIP_ARCH__ >= 350
Y[i] = __ldg(X + i) / (T(1) + exp(-__ldg(X + i)));
#else
Y[i] = X[i] / (T(1) + exp(-X[i]));
#endif
}
}
template <typename T>
__global__ void SwishGradientHIPKernel(
const int N, const T* X, const T* Y, const T* dY, T* dX) {
HIP_1D_KERNEL_LOOP(i, N) {
#if __HIP_ARCH__ >= 350
dX[i] = __ldg(dY + i) *
(__ldg(Y + i) + (T(1) - __ldg(Y + i)) / (T(1) + exp(-__ldg(X + i))));
#else
dX[i] = dY[i] * (Y[i] + (T(1) - Y[i]) / (T(1) + exp(-X[i])));
#endif
}
}
}
template <>
template <typename T>
bool SwishFunctor<HIPContext>::
operator()(const int N, const T* X, T* Y, HIPContext* context) const {
hipLaunchKernelGGL(( SwishHIPKernel<T>)
, dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_HIP_NUM_THREADS), 0, context->hip_stream(), N, X, Y);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
template <typename T>
bool SwishGradientOp<HIPContext>::DoRunWithType() {
auto& Xin = Input(X);
auto& Yin = Input(Y);
auto& DYin = Input(DY);
auto* DXout = Output(DX);
CAFFE_ENFORCE_EQ(Xin.size(), Yin.size());
CAFFE_ENFORCE_EQ(DYin.size(), Yin.size());
DXout->ResizeLike(Yin);
const int n = Xin.size();
const T* x = Xin.template data<T>();
const T* y = Yin.template data<T>();
const T* dy = DYin.template data<T>();
T* dx = DXout->template mutable_data<T>();
hipLaunchKernelGGL(( SwishGradientHIPKernel<T>)
, dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_HIP_NUM_THREADS), 0, context_.hip_stream(), n, x, y, dy, dx);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
bool SwishGradientOp<HIPContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<float, double>>::call(this, Input(X));
}
REGISTER_HIP_OPERATOR(
Swish, UnaryElementwiseOp<
TensorTypes<float, double>, HIPContext, SwishFunctor<HIPContext>>);
REGISTER_HIP_OPERATOR(SwishGradient, SwishGradientOp<HIPContext>);
} ### |
#include "caffe2/operators/swish_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void SwishCUDAKernel(const int N, const T* X, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
Y[i] = __ldg(X + i) / (T(1) + exp(-__ldg(X + i)));
#else
Y[i] = X[i] / (T(1) + exp(-X[i]));
#endif
}
}
template <typename T>
__global__ void SwishGradientCUDAKernel(
const int N,
const T* X,
const T* Y,
const T* dY,
T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = __ldg(dY + i) *
(__ldg(Y + i) + (T(1) - __ldg(Y + i)) / (T(1) + exp(-__ldg(X + i))));
#else
dX[i] = dY[i] * (Y[i] + (T(1) - Y[i]) / (T(1) + exp(-X[i])));
#endif
}
}
} // namespace
template <>
template <typename T>
bool SwishFunctor<CUDAContext>::
operator()(const int N, const T* X, T* Y, CUDAContext* context) const {
SwishCUDAKernel<T>
<<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, X, Y);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
template <typename T>
bool SwishGradientOp<CUDAContext>::DoRunWithType() {
auto& Xin = Input(X);
auto& Yin = Input(Y);
auto& DYin = Input(DY);
auto* DXout = Output(DX);
CAFFE_ENFORCE_EQ(Xin.size(), Yin.size());
CAFFE_ENFORCE_EQ(DYin.size(), Yin.size());
DXout->ResizeLike(Yin);
const int n = Xin.size();
const T* x = Xin.template data<T>();
const T* y = Yin.template data<T>();
const T* dy = DYin.template data<T>();
T* dx = DXout->template mutable_data<T>();
SwishGradientCUDAKernel<T>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(n, x, y, dy, dx);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
bool SwishGradientOp<CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<float, double>>::call(this, Input(X));
}
REGISTER_CUDA_OPERATOR(
Swish,
UnaryElementwiseOp<
TensorTypes<float, double>,
CUDAContext,
SwishFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(SwishGradient, SwishGradientOp<CUDAContext>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/tanh_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
TanhGradientHIPKernel(const int N, const T* dY, const T* Y, T* dX) {
HIP_1D_KERNEL_LOOP(i, N) {
#if __HIP_ARCH__ >= 350
dX[i] = __ldg(dY + i) * (T(1) - __ldg(Y + i) * __ldg(Y + i));
#else
dX[i] = dY[i] * (T(1) - Y[i] * Y[i]);
#endif
}
}
} // namespace
template <>
template <typename T>
bool TanhGradientFunctor<HIPContext>::Forward(
const std::vector<int>& Y_dims,
const std::vector<int>& /* dY_dims */,
const T* Y,
const T* dY,
T* dX,
HIPContext* context) const {
const int size = std::accumulate(
Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies<int>());
hipLaunchKernelGGL(( TanhGradientHIPKernel<T>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context->hip_stream(), size, dY, Y, dX);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(
Tanh,
UnaryElementwiseOp<
TensorTypes<float>,
HIPContext,
TanhFunctor<HIPContext>>);
REGISTER_HIP_OPERATOR(
TanhGradient,
BinaryElementwiseOp<
TensorTypes<float>,
HIPContext,
TanhGradientFunctor<HIPContext>>);
} // namespace caffe2
### |
#include "caffe2/operators/tanh_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void
TanhGradientCUDAKernel(const int N, const T* dY, const T* Y, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = __ldg(dY + i) * (T(1) - __ldg(Y + i) * __ldg(Y + i));
#else
dX[i] = dY[i] * (T(1) - Y[i] * Y[i]);
#endif
}
}
} // namespace
template <>
template <typename T>
bool TanhGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& Y_dims,
const std::vector<int>& /* dY_dims */,
const T* Y,
const T* dY,
T* dX,
CUDAContext* context) const {
const int size = std::accumulate(
Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies<int>());
TanhGradientCUDAKernel<T>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, dY, Y, dX);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(
Tanh,
UnaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
TanhFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
TanhGradient,
BinaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
TanhGradientFunctor<CUDAContext>>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/tan_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/hip/context_gpu.h"
namespace caffe2 {
template <typename T>
inline __host__ __device__ T Square(const T& x) {
return x * x;
}
template <typename T>
__global__ void
TanGradientHIPKernel(const int N, const T* dY, const T* X, T* dX) {
HIP_1D_KERNEL_LOOP(i, N) {
#if __HIP_ARCH__ >= 350
dX[i] = __ldg(dY + i) / Square(cos(__ldg(X + i)));
#else
dX[i] = dY[i] / Square(cos(X[i]));
#endif
}
}
template <>
template <typename T>
bool TanGradientFunctor<HIPContext>::Forward(
const std::vector<int>& X_dims,
const std::vector<int>& /* dY_dims */,
const T* X,
const T* dY,
T* dX,
HIPContext* context) const {
const int size = std::accumulate(
X_dims.cbegin(), X_dims.cend(), 1, std::multiplies<int>());
hipLaunchKernelGGL(( TanGradientHIPKernel<T>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_HIP_NUM_THREADS),
0,
context->hip_stream(), size, dY, X, dX);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_HIP_OPERATOR(
Tan,
UnaryElementwiseOp<
TensorTypes<float>,
HIPContext,
TanFunctor<HIPContext>>);
REGISTER_HIP_OPERATOR(
TanGradient,
BinaryElementwiseOp<
TensorTypes<float>,
HIPContext,
TanGradientFunctor<HIPContext>>);
} // namespace caffe2
### |
#include "caffe2/operators/tan_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
template <typename T>
inline __host__ __device__ T Square(const T& x) {
return x * x;
}
template <typename T>
__global__ void
TanGradientCUDAKernel(const int N, const T* dY, const T* X, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = __ldg(dY + i) / Square(cos(__ldg(X + i)));
#else
dX[i] = dY[i] / Square(cos(X[i]));
#endif
}
}
template <>
template <typename T>
bool TanGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& X_dims,
const std::vector<int>& /* dY_dims */,
const T* X,
const T* dY,
T* dX,
CUDAContext* context) const {
const int size = std::accumulate(
X_dims.cbegin(), X_dims.cend(), 1, std::multiplies<int>());
TanGradientCUDAKernel<T>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, dY, X, dX);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(
Tan,
UnaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
TanFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
TanGradient,
BinaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
TanGradientFunctor<CUDAContext>>);
} // namespace caffe2
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/core/TensorBase.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <ATen/hip\ApplyGridUtils.cuh>
#include <ATen/hip/detail\OffsetCalculator.cuh>
#include <ATen/native/hip\Loops.cuh>
namespace at::native {
namespace {
void softshrink_kernel(TensorIteratorBase& iter, const Scalar& value) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"softshrink_hip",
[&]() {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd] GPU_LAMBDA(scalar_t a) -> scalar_t {
return a > lambd ? a - lambd : (a < -lambd ? a + lambd : scalar_t(0));
});
});
}
void shrink_backward_kernel(TensorIteratorBase& iter, const Scalar& value) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"shrink_backward_hip",
[&]() {
auto lambd = value.to<scalar_t>();
gpu_kernel(
iter,
[lambd] GPU_LAMBDA(
scalar_t grad_val, scalar_t self_val) -> scalar_t {
return (self_val >= -lambd && self_val <= lambd) ? scalar_t(0)
: grad_val;
});
});
}
} // namespace
REGISTER_DISPATCH(softshrink_stub, &softshrink_kernel);
REGISTER_DISPATCH(shrink_backward_stub, &shrink_backward_kernel);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/core/TensorBase.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <ATen/cuda/ApplyGridUtils.cuh>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/native/cuda/Loops.cuh>
namespace at::native {
namespace {
void softshrink_kernel(TensorIteratorBase& iter, const Scalar& value) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"softshrink_cuda",
[&]() {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd] GPU_LAMBDA(scalar_t a) -> scalar_t {
return a > lambd ? a - lambd : (a < -lambd ? a + lambd : scalar_t(0));
});
});
}
void shrink_backward_kernel(TensorIteratorBase& iter, const Scalar& value) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"shrink_backward_cuda",
[&]() {
auto lambd = value.to<scalar_t>();
gpu_kernel(
iter,
[lambd] GPU_LAMBDA(
scalar_t grad_val, scalar_t self_val) -> scalar_t {
return (self_val >= -lambd && self_val <= lambd) ? scalar_t(0)
: grad_val;
});
});
}
} // namespace
REGISTER_DISPATCH(softshrink_stub, &softshrink_kernel);
REGISTER_DISPATCH(shrink_backward_stub, &shrink_backward_kernel);
} // namespace at::native
### |