hip
stringlengths 140
3.32k
| cuda
stringlengths 84
3.33k
|
---|---|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/math/binary_elementwise_ops.h"
#include "core/providers/rocm/rocm_common.h"
#include "core/providers/rocm/shared_inc/fast_divmod.h"
#include "core/providers/cpu/tensor/utils.h"
using namespace onnxruntime::rocm;
namespace onnxruntime {
namespace contrib {
namespace rocm {
// AddGelu fuse Add + Gelu
template <typename T>
class BiasGelu final : public BinaryElementwise<ShouldBroadcast> {
public:
BiasGelu(const OpKernelInfo& info) : BinaryElementwise(info) {
}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace rocm
} // namespace contrib
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/math/binary_elementwise_ops.h"
#include "core/providers/cuda/cuda_common.h"
#include "core/providers/cuda/shared_inc/fast_divmod.h"
#include "core/providers/cpu/tensor/utils.h"
using namespace onnxruntime::cuda;
namespace onnxruntime {
namespace contrib {
namespace cuda {
// AddGelu fuse Add + Gelu
template <typename T>
class BiasGelu final : public BinaryElementwise<ShouldBroadcast> {
public:
BiasGelu(const OpKernelInfo& info) : BinaryElementwise(info) {
}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
### |
#include <stdint.h>
#include "core/providers/rocm/shared_inc/rocm_utils.h"
using namespace onnxruntime::rocm;
namespace onnxruntime {
namespace contrib {
namespace rocm {
// These macros simplifies coding. To add a new op with following steps:
// 1. Add a new entry in CONTRIB_BINARY_OPS() list
// 2. (optional) Define templated single element operator in binary_elementwise_ops_impl.cu
// 3. (optional) Implement specialized single element operator
// 4. Add op kernel class definition in binary_elementwise_ops.h
// 5. Add op kernel registration and compute specialization in binary_elementwise_ops.cc
#define CONTRIB_BINARY_OPS() \
CONTRIB_BINARY_OP_NAME_EXPR(BiasGelu, _Gelu(a + b))
// NOTE that cu files are compiled with nvcc and should not refer to any onnxruntime headers
// so struct BinaryElementwisePreparation cannot be used here
#define CONTRIB_BINARY_ELEMENTWISE_IMPL_DECLARATION(name) \
template <typename T> \
void Impl_##name( \
hipStream_t stream, \
int32_t output_rank_or_simple_broadcast, \
const TArray<int64_t>* lhs_padded_strides, \
const T* lhs_data, \
const TArray<int64_t>* rhs_padded_strides, \
const T* rhs_data, \
const TArray<onnxruntime::rocm::fast_divmod>* fdm_output_strides, \
const onnxruntime::rocm::fast_divmod& fdm_H, \
const onnxruntime::rocm::fast_divmod& fdm_C, \
T* output_data, \
size_t count)
#define CONTRIB_BINARY_OP_NAME_EXPR(name, expr) CONTRIB_BINARY_ELEMENTWISE_IMPL_DECLARATION(name);
CONTRIB_BINARY_OPS()
#undef CONTRIB_BINARY_OP_NAME_EXPR
} // namespace rocm
} // namespace contrib
} // namespace onnxruntime
### |
#include <stdint.h>
#include "core/providers/cuda/shared_inc/cuda_utils.h"
using namespace onnxruntime::cuda;
namespace onnxruntime {
namespace contrib {
namespace cuda {
// These macros simplifies coding. To add a new op with following steps:
// 1. Add a new entry in CONTRIB_BINARY_OPS() list
// 2. (optional) Define templated single element operator in binary_elementwise_ops_impl.cu
// 3. (optional) Implement specialized single element operator
// 4. Add op kernel class definition in binary_elementwise_ops.h
// 5. Add op kernel registration and compute specialization in binary_elementwise_ops.cc
#define CONTRIB_BINARY_OPS() \
CONTRIB_BINARY_OP_NAME_EXPR(BiasGelu, _Gelu(a + b))
// NOTE that cu files are compiled with nvcc and should not refer to any onnxruntime headers
// so struct BinaryElementwisePreparation cannot be used here
#define CONTRIB_BINARY_ELEMENTWISE_IMPL_DECLARATION(name) \
template <typename T> \
void Impl_##name( \
cudaStream_t stream, \
int32_t output_rank_or_simple_broadcast, \
const TArray<int64_t>* lhs_padded_strides, \
const T* lhs_data, \
const TArray<int64_t>* rhs_padded_strides, \
const T* rhs_data, \
const TArray<onnxruntime::cuda::fast_divmod>* fdm_output_strides, \
const onnxruntime::cuda::fast_divmod& fdm_H, \
const onnxruntime::cuda::fast_divmod& fdm_C, \
T* output_data, \
size_t count)
#define CONTRIB_BINARY_OP_NAME_EXPR(name, expr) CONTRIB_BINARY_ELEMENTWISE_IMPL_DECLARATION(name);
CONTRIB_BINARY_OPS()
#undef CONTRIB_BINARY_OP_NAME_EXPR
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/rocm/nn/dropout.h"
#include "core/providers/rocm/shared_inc/rocm_utils.h"
namespace onnxruntime {
namespace contrib {
namespace rocm {
ONNX_OPERATOR_KERNEL_EX(BitmaskDropout, kMSDomain, 1, kRocmExecutionProvider,
(*KernelDefBuilder::Create())
.TypeConstraint("T", BuildKernelDefConstraints<MLFloat16, float, double, BFloat16>())
.TypeConstraint("T1", BuildKernelDefConstraints<MLFloat16, float, double, BFloat16>())
.TypeConstraint("T2", DataTypeImpl::GetTensorType<bool>())
.TypeConstraint("T3", DataTypeImpl::GetTensorType<onnxruntime::rocm::BitmaskElementType>())
.InputMemoryType(OrtMemTypeCPUInput, 1)
.InputMemoryType(OrtMemTypeCPUInput, 2),
onnxruntime::rocm::Dropout<true>);
} // namespace rocm
} // namespace contrib
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/nn/dropout.h"
#include "core/providers/cuda/shared_inc/cuda_utils.h"
namespace onnxruntime {
namespace contrib {
namespace cuda {
ONNX_OPERATOR_KERNEL_EX(BitmaskDropout, kMSDomain, 1, kCudaExecutionProvider,
(*KernelDefBuilder::Create())
.TypeConstraint("T", BuildKernelDefConstraints<MLFloat16, float, double, BFloat16>())
.TypeConstraint("T1", BuildKernelDefConstraints<MLFloat16, float, double, BFloat16>())
.TypeConstraint("T2", DataTypeImpl::GetTensorType<bool>())
.TypeConstraint("T3", DataTypeImpl::GetTensorType<onnxruntime::cuda::BitmaskElementType>())
.InputMemoryType(OrtMemTypeCPUInput, 1)
.InputMemoryType(OrtMemTypeCPUInput, 2),
onnxruntime::cuda::Dropout<true>);
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/rocm/math/matmul.h"
namespace onnxruntime {
namespace contrib {
namespace rocm {
#define REGISTER_KERNEL_TYPED(op_name, T) \
ONNX_OPERATOR_TYPED_KERNEL_EX( \
op_name, \
kMSDomain, \
1, \
T, \
kRocmExecutionProvider, \
(*KernelDefBuilder::Create()) \
.TypeConstraint("T", DataTypeImpl::GetTensorType<T>()), \
onnxruntime::rocm::MatMul<T>);
// TransposeMatMul is kept here for backward compatibility
REGISTER_KERNEL_TYPED(TransposeMatMul, float)
REGISTER_KERNEL_TYPED(TransposeMatMul, double)
REGISTER_KERNEL_TYPED(TransposeMatMul, MLFloat16)
REGISTER_KERNEL_TYPED(TransposeMatMul, BFloat16)
REGISTER_KERNEL_TYPED(FusedMatMul, float)
REGISTER_KERNEL_TYPED(FusedMatMul, double)
REGISTER_KERNEL_TYPED(FusedMatMul, MLFloat16)
REGISTER_KERNEL_TYPED(FusedMatMul, BFloat16)
} // namespace rocm
} // namespace contrib
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/math/matmul.h"
namespace onnxruntime {
namespace contrib {
namespace cuda {
#define REGISTER_KERNEL_TYPED(op_name, T) \
ONNX_OPERATOR_TYPED_KERNEL_EX( \
op_name, \
kMSDomain, \
1, \
T, \
kCudaExecutionProvider, \
(*KernelDefBuilder::Create()) \
.TypeConstraint("T", DataTypeImpl::GetTensorType<T>()), \
onnxruntime::cuda::MatMul<T>);
// TransposeMatMul is kept here for backward compatibility
REGISTER_KERNEL_TYPED(TransposeMatMul, float)
REGISTER_KERNEL_TYPED(TransposeMatMul, double)
REGISTER_KERNEL_TYPED(TransposeMatMul, MLFloat16)
REGISTER_KERNEL_TYPED(TransposeMatMul, BFloat16)
REGISTER_KERNEL_TYPED(FusedMatMul, float)
REGISTER_KERNEL_TYPED(FusedMatMul, double)
REGISTER_KERNEL_TYPED(FusedMatMul, MLFloat16)
REGISTER_KERNEL_TYPED(FusedMatMul, BFloat16)
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "contrib_ops/rocm/math/isfinite.h"
#include "isfinite_impl.h"
using namespace ONNX_NAMESPACE;
using namespace onnxruntime::common;
namespace onnxruntime {
namespace rocm {
#define REGISTER_ISALLFINITE_KERNEL_TYPED(T) \
ONNX_OPERATOR_TYPED_KERNEL_EX( \
IsAllFinite, \
kMSDomain, \
1, \
T, \
kRocmExecutionProvider, \
(*KernelDefBuilder::Create()) \
.TypeConstraint("V", DataTypeImpl::GetTensorType<T>()) \
.TypeConstraint("T", DataTypeImpl::GetTensorType<bool>()), \
IsAllFiniteOp<T>);
template <typename TSrc>
Status IsAllFiniteOp<TSrc>::ComputeInternal(OpKernelContext* context) const {
typedef typename ToHipType<TSrc>::MappedType TSrcCuda;
// Get Input tensor count.
const auto total_tensor_count = context->InputCount();
// Initialize the output to true. GPU kernel will set it
// to false if any value in any tensor is non-finite.
Tensor& output = *context->Output(0, {});
auto* output_data = reinterpret_cast<ToHipType<bool>::MappedType*>(output.MutableData<bool>());
HIP_RETURN_IF_ERROR(hipMemsetAsync(output_data, int(true), sizeof(bool), Stream(context)));
std::vector<std::vector<void*>> grouped_tensor_pointers(total_tensor_count);
std::vector<int> tensor_sizes(total_tensor_count);
for (int i = 0; i < total_tensor_count; ++i) {
const auto& input = context->Input<Tensor>(i);
grouped_tensor_pointers[i] = {const_cast<TSrc*>(input->Data<TSrc>())};
tensor_sizes[i] = static_cast<int>(input->Shape().Size());
}
typedef IsAllFiniteFunctor<TSrcCuda> TFunctor;
TFunctor functor;
// Check if all values are finite and write true to output.
// Otherwise, false will be written.
launch_multi_tensor_functor<1, TFunctor>(
Stream(context), 2048 * 32, tensor_sizes, grouped_tensor_pointers, functor, output_data, isinf_only_, isnan_only_);
return Status::OK();
}
REGISTER_ISALLFINITE_KERNEL_TYPED(MLFloat16)
REGISTER_ISALLFINITE_KERNEL_TYPED(float)
REGISTER_ISALLFINITE_KERNEL_TYPED(double)
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "contrib_ops/cuda/math/isfinite.h"
#include "isfinite_impl.h"
using namespace ONNX_NAMESPACE;
using namespace onnxruntime::common;
namespace onnxruntime {
namespace cuda {
#define REGISTER_ISALLFINITE_KERNEL_TYPED(T) \
ONNX_OPERATOR_TYPED_KERNEL_EX( \
IsAllFinite, \
kMSDomain, \
1, \
T, \
kCudaExecutionProvider, \
(*KernelDefBuilder::Create()) \
.TypeConstraint("V", DataTypeImpl::GetTensorType<T>()) \
.TypeConstraint("T", DataTypeImpl::GetTensorType<bool>()), \
IsAllFiniteOp<T>);
template <typename TSrc>
Status IsAllFiniteOp<TSrc>::ComputeInternal(OpKernelContext* context) const {
typedef typename ToCudaType<TSrc>::MappedType TSrcCuda;
// Get Input tensor count.
const auto total_tensor_count = context->InputCount();
// Initialize the output to true. GPU kernel will set it
// to false if any value in any tensor is non-finite.
Tensor& output = *context->Output(0, {});
auto* output_data = reinterpret_cast<ToCudaType<bool>::MappedType*>(output.MutableData<bool>());
CUDA_RETURN_IF_ERROR(cudaMemsetAsync(output_data, int(true), sizeof(bool), Stream(context)));
std::vector<std::vector<void*>> grouped_tensor_pointers(total_tensor_count);
std::vector<int> tensor_sizes(total_tensor_count);
for (int i = 0; i < total_tensor_count; ++i) {
const auto& input = context->Input<Tensor>(i);
grouped_tensor_pointers[i] = {const_cast<TSrc*>(input->Data<TSrc>())};
tensor_sizes[i] = static_cast<int>(input->Shape().Size());
}
typedef IsAllFiniteFunctor<TSrcCuda> TFunctor;
TFunctor functor;
// Check if all values are finite and write true to output.
// Otherwise, false will be written.
launch_multi_tensor_functor<1, TFunctor>(
Stream(context), 2048 * 32, tensor_sizes, grouped_tensor_pointers, functor, output_data, isinf_only_, isnan_only_);
return Status::OK();
}
REGISTER_ISALLFINITE_KERNEL_TYPED(MLFloat16)
REGISTER_ISALLFINITE_KERNEL_TYPED(float)
REGISTER_ISALLFINITE_KERNEL_TYPED(double)
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <hip/hip_fp16.h>
#include "core/providers/rocm/cu_inc/common.cuh"
#include "contrib_ops/rocm/math/isfinite.h"
namespace onnxruntime {
namespace rocm {
template <typename T>
__device__ __forceinline__ bool IsFiniteScalar(const T value) {
return isfinite(value);
}
template <typename T>
__device__ __forceinline__ bool IsInfScalar(const T value) {
return isinf(value);
}
template <typename T>
__device__ __forceinline__ bool IsNaNScalar(const T value) {
return isnan(value);
}
template <>
__device__ __forceinline__ bool IsFiniteScalar(const half value) {
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
return !__hisinf(value) && !__hisnan(value);
#else
return isfinite(float(value));
#endif
}
template <>
__device__ __forceinline__ bool IsInfScalar(const half value) {
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
return __hisinf(value);
#else
return isinf(float(value));
#endif
}
template <>
__device__ __forceinline__ bool IsNaNScalar(const half value) {
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
return __hisnan(value);
#else
return isnan(float(value));
#endif
}
template <>
__device__ __forceinline__ bool IsFiniteScalar(const BFloat16 value) {
return isfinite(static_cast<float>(value));
}
template <>
__device__ __forceinline__ bool IsInfScalar(const BFloat16 value) {
return isinf(static_cast<float>(value));
}
template <>
__device__ __forceinline__ bool IsNaNScalar(const BFloat16 value) {
return isnan(static_cast<float>(value));
}
} // namespace rocm
} // namespace onnxruntime### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <cuda_fp16.h>
#include "core/providers/cuda/cu_inc/common.cuh"
#include "contrib_ops/cuda/math/isfinite.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
__device__ __forceinline__ bool IsFiniteScalar(const T value) {
return isfinite(value);
}
template <typename T>
__device__ __forceinline__ bool IsInfScalar(const T value) {
return isinf(value);
}
template <typename T>
__device__ __forceinline__ bool IsNaNScalar(const T value) {
return isnan(value);
}
template <>
__device__ __forceinline__ bool IsFiniteScalar(const half value) {
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
return !__hisinf(value) && !__hisnan(value);
#else
return isfinite(float(value));
#endif
}
template <>
__device__ __forceinline__ bool IsInfScalar(const half value) {
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
return __hisinf(value);
#else
return isinf(float(value));
#endif
}
template <>
__device__ __forceinline__ bool IsNaNScalar(const half value) {
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
return __hisnan(value);
#else
return isnan(float(value));
#endif
}
template <>
__device__ __forceinline__ bool IsFiniteScalar(const BFloat16 value) {
return isfinite(static_cast<float>(value));
}
template <>
__device__ __forceinline__ bool IsInfScalar(const BFloat16 value) {
return isinf(static_cast<float>(value));
}
template <>
__device__ __forceinline__ bool IsNaNScalar(const BFloat16 value) {
return isnan(static_cast<float>(value));
}
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/rocm_kernel.h"
#include "core/providers/cpu/math/clip.h"
namespace onnxruntime {
namespace rocm {
template <typename T>
class Clip_6 final : public onnxruntime::clip_internal::Clip_6Base<T>, public RocmKernel {
public:
explicit Clip_6(const OpKernelInfo& info) : onnxruntime::clip_internal::Clip_6Base<T>(info), RocmKernel{info} {
}
Status ComputeInternal(OpKernelContext* context) const override;
};
// Since version 11. Min and Max are inputs
// version 12 adds type support
class Clip final : public RocmKernel {
public:
explicit Clip(const OpKernelInfo& info) : RocmKernel{info} {
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
template <typename T>
struct ComputeImpl;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/cuda_kernel.h"
#include "core/providers/cpu/math/clip.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
class Clip_6 final : public onnxruntime::clip_internal::Clip_6Base<T>, public CudaKernel {
public:
explicit Clip_6(const OpKernelInfo& info) : onnxruntime::clip_internal::Clip_6Base<T>(info), CudaKernel{info} {
}
Status ComputeInternal(OpKernelContext* context) const override;
};
// Since version 11. Min and Max are inputs
// version 12 adds type support
class Clip final : public CudaKernel {
public:
explicit Clip(const OpKernelInfo& info) : CudaKernel{info} {
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
template <typename T>
struct ComputeImpl;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
template <typename TSrc>
class IsAllFiniteOp final : public RocmKernel {
public:
IsAllFiniteOp(const OpKernelInfo& info) : RocmKernel(info) {
int64_t isinf_only;
info.GetAttrOrDefault("isinf_only", &isinf_only, static_cast<int64_t>(0));
isinf_only_ = (isinf_only != 0);
int64_t isnan_only;
info.GetAttrOrDefault("isnan_only", &isnan_only, static_cast<int64_t>(0));
isnan_only_ = (isnan_only != 0);
ORT_ENFORCE(!(isinf_only_ && isnan_only_),
"Both attributes isinf_only and isnan_only cannot be set. Unset both to check for both conditions.");
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
bool isinf_only_, isnan_only_;
};
} // namespace rocm
} // namespace onnxruntime### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
template <typename TSrc>
class IsAllFiniteOp final : public CudaKernel {
public:
IsAllFiniteOp(const OpKernelInfo& info) : CudaKernel(info) {
int64_t isinf_only;
info.GetAttrOrDefault("isinf_only", &isinf_only, static_cast<int64_t>(0));
isinf_only_ = (isinf_only != 0);
int64_t isnan_only;
info.GetAttrOrDefault("isnan_only", &isnan_only, static_cast<int64_t>(0));
isnan_only_ = (isnan_only != 0);
ORT_ENFORCE(!(isinf_only_ && isnan_only_),
"Both attributes isinf_only and isnan_only cannot be set. Unset both to check for both conditions.");
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
bool isinf_only_, isnan_only_;
};
} // namespace cuda
} // namespace onnxruntime
### |
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <hip/hip_fp16.h>
#include "isfinite_impl.h"
#include "core/providers/rocm/cu_inc/common.cuh"
#include "contrib_ops/rocm/math/isfinite.cuh"
namespace onnxruntime {
namespace rocm {
template <typename TSrc, bool isinf_only, bool isnan_only>
__global__ void IsAllFiniteMultiTensorImpl(ChunkGroup<1> chunks, bool* output) {
const int block_idx = blockIdx.x;
const int tensor_idx = chunks.block_index_to_tensor_group_index[block_idx];
const int tensor_size = chunks.tensor_sizes[tensor_idx];
const TSrc* tensor_ptr = static_cast<TSrc*>(chunks.tensor_ptrs[0][tensor_idx]);
const int chunk_start_idx = chunks.block_index_to_chunk_start_index[block_idx];
// chunk_size is chunks.chunk_size if the loaded chunk is full. Otherwise (this
// chunk is the last one in the source tensor), the actual size is determined
// by the bound of the source tensor.
const int chunk_size = min(tensor_size, chunk_start_idx + chunks.chunk_size) - chunk_start_idx;
const TSrc* chunk_ptr = tensor_ptr + chunk_start_idx;
bool result = true;
#pragma unroll 4
for (int i = threadIdx.x; i < chunk_size; i += blockDim.x) {
if (isinf_only) {
result &= !IsInfScalar(chunk_ptr[i]);
} else if (isnan_only) {
result &= !IsNaNScalar(chunk_ptr[i]);
} else {
result &= IsFiniteScalar(chunk_ptr[i]);
}
}
if (!result) {
*output = false;
}
}
template <typename T>
void IsAllFiniteFunctor<T>::operator()(hipStream_t stream,
ChunkGroup<1> chunks,
bool* output,
const bool isinf_only,
const bool isnan_only) {
const int block_count = chunks.chunk_count;
const int thread_count = ChunkGroup<1>::thread_count_per_block;
if (isinf_only) {
IsAllFiniteMultiTensorImpl<T, true, false><<<block_count, thread_count, 0, stream>>>(chunks, output);
} else if (isnan_only) {
IsAllFiniteMultiTensorImpl<T, false, true><<<block_count, thread_count, 0, stream>>>(chunks, output);
} else {
IsAllFiniteMultiTensorImpl<T, false, false><<<block_count, thread_count, 0, stream>>>(chunks, output);
}
}
#define INSTANTIATE_ISALLFINITE_FUNCTOR(T) \
template void IsAllFiniteFunctor<T>::operator()(hipStream_t stream, \
ChunkGroup<1> chunks, \
bool* output, \
const bool isinf_only, \
const bool isnan_only);
INSTANTIATE_ISALLFINITE_FUNCTOR(half)
INSTANTIATE_ISALLFINITE_FUNCTOR(float)
INSTANTIATE_ISALLFINITE_FUNCTOR(double)
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <cuda_fp16.h>
#include "isfinite_impl.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "contrib_ops/cuda/math/isfinite.cuh"
namespace onnxruntime {
namespace cuda {
template <typename TSrc, bool isinf_only, bool isnan_only>
__global__ void IsAllFiniteMultiTensorImpl(ChunkGroup<1> chunks, bool* output) {
const int block_idx = blockIdx.x;
const int tensor_idx = chunks.block_index_to_tensor_group_index[block_idx];
const int tensor_size = chunks.tensor_sizes[tensor_idx];
const TSrc* tensor_ptr = static_cast<TSrc*>(chunks.tensor_ptrs[0][tensor_idx]);
const int chunk_start_idx = chunks.block_index_to_chunk_start_index[block_idx];
// chunk_size is chunks.chunk_size if the loaded chunk is full. Otherwise (this
// chunk is the last one in the source tensor), the actual size is determined
// by the bound of the source tensor.
const int chunk_size = min(tensor_size, chunk_start_idx + chunks.chunk_size) - chunk_start_idx;
const TSrc* chunk_ptr = tensor_ptr + chunk_start_idx;
bool result = true;
#pragma unroll 4
for (int i = threadIdx.x; i < chunk_size; i += blockDim.x) {
if (isinf_only) {
result &= !IsInfScalar(chunk_ptr[i]);
} else if (isnan_only) {
result &= !IsNaNScalar(chunk_ptr[i]);
} else {
result &= IsFiniteScalar(chunk_ptr[i]);
}
}
if (!result) {
*output = false;
}
}
template <typename T>
void IsAllFiniteFunctor<T>::operator()(cudaStream_t stream,
ChunkGroup<1> chunks,
bool* output,
const bool isinf_only,
const bool isnan_only) {
const int block_count = chunks.chunk_count;
const int thread_count = ChunkGroup<1>::thread_count_per_block;
if (isinf_only) {
IsAllFiniteMultiTensorImpl<T, true, false><<<block_count, thread_count, 0, stream>>>(chunks, output);
} else if (isnan_only) {
IsAllFiniteMultiTensorImpl<T, false, true><<<block_count, thread_count, 0, stream>>>(chunks, output);
} else {
IsAllFiniteMultiTensorImpl<T, false, false><<<block_count, thread_count, 0, stream>>>(chunks, output);
}
}
#define INSTANTIATE_ISALLFINITE_FUNCTOR(T) \
template void IsAllFiniteFunctor<T>::operator()(cudaStream_t stream, \
ChunkGroup<1> chunks, \
bool* output, \
const bool isinf_only, \
const bool isnan_only);
INSTANTIATE_ISALLFINITE_FUNCTOR(half)
INSTANTIATE_ISALLFINITE_FUNCTOR(float)
INSTANTIATE_ISALLFINITE_FUNCTOR(double)
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <hip/hip_runtime.h>
#include "core/providers/rocm/multi_tensor/common.cuh"
namespace onnxruntime {
namespace rocm {
template <typename T>
struct IsAllFiniteFunctor {
void operator()(hipStream_t stream, ChunkGroup<1> chunks, bool* output, const bool isinf_only, const bool isnan_only);
};
} // namespace rocm
} // namespace onnxruntime### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <cuda_runtime.h>
#include "core/providers/cuda/multi_tensor/common.cuh"
namespace onnxruntime {
namespace cuda {
template <typename T>
struct IsAllFiniteFunctor {
void operator()(cudaStream_t stream, ChunkGroup<1> chunks, bool* output, const bool isinf_only, const bool isnan_only);
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifdef ENABLE_TRAINING_OPS
// Should remove the shrunken_gather include from ENABLE_TRAINING_OPS once 1). compute optimizer is enabled for inference or
// 2). this is needed by inference for other purpose.
#include "contrib_ops/rocm/tensor/shrunken_gather.h"
#include "contrib_ops/cpu/tensor/shrunken_gather.h"
namespace onnxruntime {
namespace contrib {
namespace rocm {
using namespace onnxruntime::rocm;
ONNX_OPERATOR_KERNEL_EX(
ShrunkenGather,
kMSDomain,
1,
kRocmExecutionProvider,
(*KernelDefBuilder::Create())
.TypeConstraint("T", DataTypeImpl::AllTensorTypes())
.TypeConstraint("Tind", std::vector<MLDataType>{
DataTypeImpl::GetTensorType<int32_t>(),
DataTypeImpl::GetTensorType<int64_t>()}),
ShrunkenGather);
Status ShrunkenGather::ComputeInternal(OpKernelContext* context) const {
Prepare p;
ORT_RETURN_IF_ERROR(PrepareForCompute(context, p));
ShrunkenGatherCommon::CheckInput(p.input_tensor, p.indices_tensor, p.axis);
return onnxruntime::rocm::Gather::ComputeInternal(context);
}
} // namespace rocm
} // namespace contrib
} // namespace onnxruntime
#endif
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifdef ENABLE_TRAINING_OPS
// Should remove the shrunken_gather include from ENABLE_TRAINING_OPS once 1). compute optimizer is enabled for inference or
// 2). this is needed by inference for other purpose.
#include "contrib_ops/cuda/tensor/shrunken_gather.h"
#include "contrib_ops/cpu/tensor/shrunken_gather.h"
namespace onnxruntime {
namespace contrib {
namespace cuda {
using namespace onnxruntime::cuda;
ONNX_OPERATOR_KERNEL_EX(
ShrunkenGather,
kMSDomain,
1,
kCudaExecutionProvider,
(*KernelDefBuilder::Create())
.TypeConstraint("T", DataTypeImpl::AllTensorTypes())
.TypeConstraint("Tind", std::vector<MLDataType>{
DataTypeImpl::GetTensorType<int32_t>(),
DataTypeImpl::GetTensorType<int64_t>()}),
ShrunkenGather);
Status ShrunkenGather::ComputeInternal(OpKernelContext* context) const {
Prepare p;
ORT_RETURN_IF_ERROR(PrepareForCompute(context, p));
ShrunkenGatherCommon::CheckInput(p.input_tensor, p.indices_tensor, p.axis);
return onnxruntime::cuda::Gather::ComputeInternal(context);
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
#endif
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifdef ENABLE_TRAINING_OPS
// Should remove the shrunken_gather include from ENABLE_TRAINING_OPS once 1). compute optimizer is enabled for inference or
// 2). this is needed by inference for other purpose.
#include "core/providers/shared_library/provider_api.h"
#include "core/providers/rocm/tensor/gather.h"
#include "contrib_ops/cpu/tensor/shrunken_gather.h"
namespace onnxruntime {
namespace contrib {
namespace rocm {
class ShrunkenGather final : public onnxruntime::rocm::Gather, public ShrunkenGatherCommon {
public:
ShrunkenGather(const OpKernelInfo& info) : onnxruntime::rocm::Gather(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace rocm
} // namespace contrib
} // namespace onnxruntime
#endif
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifdef ENABLE_TRAINING_OPS
// Should remove the shrunken_gather include from ENABLE_TRAINING_OPS once 1). compute optimizer is enabled for inference or
// 2). this is needed by inference for other purpose.
#include "core/providers/shared_library/provider_api.h"
#include "core/providers/cuda/tensor/gather.h"
#include "contrib_ops/cpu/tensor/shrunken_gather.h"
namespace onnxruntime {
namespace contrib {
namespace cuda {
class ShrunkenGather final : public onnxruntime::cuda::Gather, public ShrunkenGatherCommon {
public:
ShrunkenGather(const OpKernelInfo& info) : onnxruntime::cuda::Gather(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
#endif
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/rocm/tensor/trilu.h"
namespace onnxruntime {
namespace contrib {
namespace rocm {
ONNX_OPERATOR_KERNEL_EX(
Trilu,
kMSDomain,
1,
kRocmExecutionProvider,
(*KernelDefBuilder::Create())
.InputMemoryType(OrtMemTypeCPUInput, 1)
.MayInplace(0, 0)
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()),
onnxruntime::rocm::Trilu);
} // namespace rocm
} // namespace contrib
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/tensor/trilu.h"
namespace onnxruntime {
namespace contrib {
namespace cuda {
ONNX_OPERATOR_KERNEL_EX(
Trilu,
kMSDomain,
1,
kCudaExecutionProvider,
(*KernelDefBuilder::Create())
.InputMemoryType(OrtMemTypeCPUInput, 1)
.MayInplace(0, 0)
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes()),
onnxruntime::cuda::Trilu);
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "contrib_ops/cpu/transformers/beam_search.h"
namespace onnxruntime {
class SessionState;
namespace contrib {
namespace rocm {
class BeamSearch final : public onnxruntime::contrib::transformers::BeamSearch {
public:
BeamSearch(const OpKernelInfo& info);
Status Compute(OpKernelContext* context) const override;
private:
Status ComputeInternal(OpKernelContext* context) const;
};
} // namespace rocm
} // namespace contrib
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "contrib_ops/cpu/transformers/beam_search.h"
namespace onnxruntime {
class SessionState;
namespace contrib {
namespace cuda {
class BeamSearch final : public onnxruntime::contrib::transformers::BeamSearch {
public:
BeamSearch(const OpKernelInfo& info);
Status Compute(OpKernelContext* context) const override;
private:
Status ComputeInternal(OpKernelContext* context) const;
};
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include <hip/hip_runtime.h>
namespace onnxruntime {
namespace contrib {
namespace rocm {
template <typename T>
void BeamSearchTopK(
const T* input,
int32_t batch_size,
int32_t num_beams,
int32_t vocab_size,
int32_t k,
T* tmp_values_1st_stage,
int32_t* tmp_indices_1st_stage,
T* tmp_values_2st_stage,
int32_t* tmp_indices_2st_stage,
T* output_values,
int32_t* output_tokens,
int32_t* output_indices,
hipStream_t stream);
} // namespace rocm
} // namespace contrib
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include <cuda_runtime.h>
namespace onnxruntime {
namespace contrib {
namespace cuda {
template <typename T>
void BeamSearchTopK(
const T* input,
int32_t batch_size,
int32_t num_beams,
int32_t vocab_size,
int32_t k,
T* tmp_values_1st_stage,
int32_t* tmp_indices_1st_stage,
T* tmp_values_2st_stage,
int32_t* tmp_indices_2st_stage,
T* output_values,
int32_t* output_tokens,
int32_t* output_indices,
cudaStream_t stream);
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/framework/tensorprotoutils.h"
#include "core/framework/ort_value.h"
#include "contrib_ops/cpu/utils/console_dumper.h"
namespace onnxruntime {
namespace contrib {
namespace rocm {
namespace transformers {
class HipTensorConsoleDumper : public onnxruntime::contrib::transformers::IConsoleDumper {
public:
HipTensorConsoleDumper() = default;
virtual ~HipTensorConsoleDumper() {}
void Print(const char* name, const float* tensor, int dim0, int dim1) const override;
void Print(const char* name, const MLFloat16* tensor, int dim0, int dim1) const override;
void Print(const char* name, const size_t* tensor, int dim0, int dim1) const override;
void Print(const char* name, const half* tensor, int dim0, int dim1) const;
void Print(const char* name, const int64_t* tensor, int dim0, int dim1) const override;
void Print(const char* name, const int32_t* tensor, int dim0, int dim1) const override;
void Print(const char* name, const float* tensor, int dim0, int dim1, int dim2) const override;
void Print(const char* name, const float* tensor, int dim0, int dim1, int dim2, int dim3) const;
void Print(const char* name, const MLFloat16* tensor, int dim0, int dim1, int dim2) const override;
void Print(const char* name, const MLFloat16* tensor, int dim0, int dim1, int dim2, int dim3) const;
void Print(const char* name, const half* tensor, int dim0, int dim1, int dim2) const;
void Print(const char* name, const half* tensor, int dim0, int dim1, int dim2, int dim3) const;
void Print(const char* name, const int64_t* tensor, int dim0, int dim1, int dim2) const override;
void Print(const char* name, const int32_t* tensor, int dim0, int dim1, int dim2) const override;
void Print(const char* name, const Tensor& value) const override;
void Print(const char* name, const OrtValue& value) const override;
void Print(const char* name, int index, bool end_line) const override;
void Print(const char* name, const std::string& value, bool end_line) const override;
};
} // namespace transformers
} // namespace rocm
} // namespace contrib
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/framework/tensorprotoutils.h"
#include "core/framework/ort_value.h"
#include "contrib_ops/cpu/utils/console_dumper.h"
namespace onnxruntime {
namespace contrib {
namespace cuda {
namespace transformers {
class CudaTensorConsoleDumper : public onnxruntime::contrib::transformers::IConsoleDumper {
public:
CudaTensorConsoleDumper() = default;
virtual ~CudaTensorConsoleDumper() {}
void Print(const char* name, const float* tensor, int dim0, int dim1) const override;
void Print(const char* name, const MLFloat16* tensor, int dim0, int dim1) const override;
void Print(const char* name, const size_t* tensor, int dim0, int dim1) const override;
void Print(const char* name, const half* tensor, int dim0, int dim1) const;
void Print(const char* name, const int64_t* tensor, int dim0, int dim1) const override;
void Print(const char* name, const int32_t* tensor, int dim0, int dim1) const override;
void Print(const char* name, const float* tensor, int dim0, int dim1, int dim2) const override;
void Print(const char* name, const float* tensor, int dim0, int dim1, int dim2, int dim3) const;
void Print(const char* name, const MLFloat16* tensor, int dim0, int dim1, int dim2) const override;
void Print(const char* name, const MLFloat16* tensor, int dim0, int dim1, int dim2, int dim3) const;
void Print(const char* name, const half* tensor, int dim0, int dim1, int dim2) const;
void Print(const char* name, const half* tensor, int dim0, int dim1, int dim2, int dim3) const;
void Print(const char* name, const int64_t* tensor, int dim0, int dim1, int dim2) const override;
void Print(const char* name, const int32_t* tensor, int dim0, int dim1, int dim2) const override;
void Print(const char* name, const Tensor& value) const override;
void Print(const char* name, const OrtValue& value) const override;
void Print(const char* name, int index, bool end_line) const override;
void Print(const char* name, const std::string& value, bool end_line) const override;
};
} // namespace transformers
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
### |
#pragma once
#include <stdint.h>
#include <hip/hip_fp16.h>
#include <hiprand/hiprand_kernel.h>
namespace onnxruntime {
namespace contrib {
namespace rocm {
void LaunchInitKernel(
float* beam_scores, int batch_size, int num_beams, hipStream_t stream);
template <typename T>
void LaunchAddProbsKernel(T* log_probs, T* cum_log_probs, const int batch_size, const int num_beams, const int vocab_size, hipStream_t stream);
template <typename T>
void LaunchLogitsProcessKernel(
T* next_token_scores, const int* vocab_mask, const int* prefix_vocab_mask, int* presence_mask, float presence_penalty, float temperature, int batch_size, int num_beams, int vocab_size, int padded_vocab_size, int demote_token_id, int32_t* sequences, int max_sequence_length, int current_sequence_length, float repetition_penalty, int no_repeat_ngram_size, hipStream_t stream);
void LaunchNextTokenKernel(const int64_t* next_token_indices, int32_t* next_indices, int32_t* next_tokens, int batch_size, int top_k, int vocab_size, hipStream_t stream);
void LaunchUpdateGptKernel(const int32_t* old_mask_data, int32_t* mask_data, int32_t* next_positions, int batch_beam_size, int current_length, hipStream_t stream);
template <typename T>
void GetTempStorageSize(const T* d_keys_in, const int* d_values_in, int* d_offsets, int num_items, int num_segments, hipStream_t stream, bool is_descending, size_t& temp_storage_bytes);
void LaunchSetupParamsKernel(int* d_values_in, int* d_offsets, int batch_size, int vocab_size, hipStream_t stream);
template <typename T>
void LaunchSortPairs(void* d_temp_storage, size_t temp_storage_bytes, const T* d_keys_in, T* d_keys_out, const int* d_values_in, int* d_values_out, int num_items, int num_segments, int* d_offsets, hipStream_t stream, bool is_descending);
template <typename T>
void LaunchFilterLogitsKernel(float* d_sorted_logits_in, const int* d_sorted_indices, T* d_logits_in_out, float top_p, float filter_value, int min_tokens_to_keep, int batch_size, int vocab_size, hipStream_t stream, bool is_descending);
void TorchMultinomialKernelLauncher(float* d_input, float* d_sampled, int32_t* d_output, int batch_size, int vocab_size, int* d_presence_mask, hipStream_t stream);
void UpdateDecoderMaskedMultiHeadAttentionCacheIndirection(int32_t* tgt_indir_cache, const int32_t* src_indir_cache, const int32_t* beam_ids, int batch_size, int beam_width, int input_seq_length, int max_seq_length, int current_length, hipStream_t stream);
template <typename T>
void KeyCacheExpansionKernelLauncher(const T* key_cache, T* key_cache_expanded, int batch_size, int beam_width, int num_heads, int sequence_length, int max_seq_length, int head_size, hipStream_t stream);
template <typename T>
void BufferExpansionKernelLauncher(const T* input, T* output, int batch_size, int beam_width, int chunk_size, hipStream_t stream);
}
}
} ### |
#pragma once
#include <stdint.h>
#include <cuda_fp16.h>
#include <curand_kernel.h>
namespace onnxruntime {
namespace contrib {
namespace cuda {
void LaunchInitKernel(
float* beam_scores, int batch_size, int num_beams, cudaStream_t stream);
template <typename T>
void LaunchAddProbsKernel(T* log_probs, T* cum_log_probs, const int batch_size, const int num_beams, const int vocab_size, cudaStream_t stream);
template <typename T>
void LaunchLogitsProcessKernel(
T* next_token_scores, const int* vocab_mask, const int* prefix_vocab_mask, int* presence_mask, float presence_penalty, float temperature, int batch_size, int num_beams, int vocab_size, int padded_vocab_size, int demote_token_id, int32_t* sequences, int max_sequence_length, int current_sequence_length, float repetition_penalty, int no_repeat_ngram_size, cudaStream_t stream);
void LaunchNextTokenKernel(const int64_t* next_token_indices, int32_t* next_indices, int32_t* next_tokens, int batch_size, int top_k, int vocab_size, cudaStream_t stream);
void LaunchUpdateGptKernel(const int32_t* old_mask_data, int32_t* mask_data, int32_t* next_positions, int batch_beam_size, int current_length, cudaStream_t stream);
template <typename T>
void GetTempStorageSize(const T* d_keys_in, const int* d_values_in, int* d_offsets, int num_items, int num_segments, cudaStream_t stream, bool is_descending, size_t& temp_storage_bytes);
void LaunchSetupParamsKernel(int* d_values_in, int* d_offsets, int batch_size, int vocab_size, cudaStream_t stream);
template <typename T>
void LaunchSortPairs(void* d_temp_storage, size_t temp_storage_bytes, const T* d_keys_in, T* d_keys_out, const int* d_values_in, int* d_values_out, int num_items, int num_segments, int* d_offsets, cudaStream_t stream, bool is_descending);
template <typename T>
void LaunchFilterLogitsKernel(float* d_sorted_logits_in, const int* d_sorted_indices, T* d_logits_in_out, float top_p, float filter_value, int min_tokens_to_keep, int batch_size, int vocab_size, cudaStream_t stream, bool is_descending);
void TorchMultinomialKernelLauncher(float* d_input, float* d_sampled, int32_t* d_output, int batch_size, int vocab_size, int* d_presence_mask, cudaStream_t stream);
void UpdateDecoderMaskedMultiHeadAttentionCacheIndirection(int32_t* tgt_indir_cache, const int32_t* src_indir_cache, const int32_t* beam_ids, int batch_size, int beam_width, int input_seq_length, int max_seq_length, int current_length, cudaStream_t stream);
template <typename T>
void KeyCacheExpansionKernelLauncher(const T* key_cache, T* key_cache_expanded, int batch_size, int beam_width, int num_heads, int sequence_length, int max_seq_length, int head_size, cudaStream_t stream);
template <typename T>
void BufferExpansionKernelLauncher(const T* input, T* output, int batch_size, int beam_width, int chunk_size, cudaStream_t stream);
}
}
}
### |
#include "hip/hip_runtime.h"
#include "core/providers/rocm/math/clip_impl.h"
#include "core/providers/rocm/cu_inc/common.cuh"
namespace onnxruntime {
namespace rocm {
template <typename T>
__global__ void _Clip(const T* input, T* output, const T* min, const T* max, T min_default, T max_default, size_t N) {
auto min_val = (min) ? *min : min_default;
auto max_val = (max) ? *max : max_default;
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
output[id] = (input[id] < min_val) ? min_val : ((input[id] > max_val) ? max_val : input[id]);
}
template <typename T>
void ClipImpl(hipStream_t stream, const T* input_data, T* output_data, const T* min, const T* max, T min_default, T max_default, size_t count) {
typedef typename ToHipType<T>::MappedType HipT;
int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock));
union ConstAliasUnion {
const T *t;
const HipT *rocmT;
ConstAliasUnion(const T* _t) { t = _t;}
};
union AliasUnion {
T *t;
HipT *rocmT;
AliasUnion(T* _t) { t = _t;}
};
_Clip<HipT><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(((union ConstAliasUnion)input_data).rocmT, ((union AliasUnion)output_data).rocmT, ((union ConstAliasUnion)min).rocmT, ((union ConstAliasUnion)max).rocmT, *((union AliasUnion)&min_default).rocmT, *((union AliasUnion)&max_default).rocmT, count);
}
template void ClipImpl<float>(hipStream_t stream, const float* input_data, float* output_data, const float* min, const float* max, float min_default, float max_default, size_t count);
template void ClipImpl<double>(hipStream_t stream, const double* input_data, double* output_data, const double* min, const double* max, double min_default, double max_default, size_t count);
template void ClipImpl<MLFloat16>(hipStream_t stream, const MLFloat16* input_data, MLFloat16* output_data, const MLFloat16* min, const MLFloat16* max, MLFloat16 min_default, MLFloat16 max_default, size_t count);
template void ClipImpl<int8_t>(hipStream_t stream, const int8_t* input_data, int8_t* output_data, const int8_t* min, const int8_t* max, int8_t min_default, int8_t max_default, size_t count);
template void ClipImpl<uint8_t>(hipStream_t stream, const uint8_t* input_data, uint8_t* output_data, const uint8_t* min, const uint8_t* max, uint8_t min_default, uint8_t max_default, size_t count);
template void ClipImpl<int64_t>(hipStream_t stream, const int64_t* input_data, int64_t* output_data, const int64_t* min, const int64_t* max, int64_t min_default, int64_t max_default, size_t count);
template void ClipImpl<uint64_t>(hipStream_t stream, const uint64_t* input_data, uint64_t* output_data, const uint64_t* min, const uint64_t* max, uint64_t min_default, uint64_t max_default, size_t count);
}
} ### |
#include "core/providers/cuda/math/clip_impl.h"
#include "core/providers/cuda/cu_inc/common.cuh"
namespace onnxruntime {
namespace cuda {
template <typename T>
__global__ void _Clip(const T* input, T* output, const T* min, const T* max, T min_default, T max_default, size_t N) {
auto min_val = (min) ? *min : min_default;
auto max_val = (max) ? *max : max_default;
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
output[id] = (input[id] < min_val) ? min_val : ((input[id] > max_val) ? max_val : input[id]);
}
template <typename T>
void ClipImpl(cudaStream_t stream, const T* input_data, T* output_data, const T* min, const T* max, T min_default, T max_default, size_t count) {
typedef typename ToCudaType<T>::MappedType CudaT;
int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock));
union ConstAliasUnion {
const T *t;
const CudaT *cudaT;
ConstAliasUnion(const T* _t) { t = _t;}
};
union AliasUnion {
T *t;
CudaT *cudaT;
AliasUnion(T* _t) { t = _t;}
};
_Clip<CudaT><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(((union ConstAliasUnion)input_data).cudaT, ((union AliasUnion)output_data).cudaT, ((union ConstAliasUnion)min).cudaT, ((union ConstAliasUnion)max).cudaT, *((union AliasUnion)&min_default).cudaT, *((union AliasUnion)&max_default).cudaT, count);
}
template void ClipImpl<float>(cudaStream_t stream, const float* input_data, float* output_data, const float* min, const float* max, float min_default, float max_default, size_t count);
template void ClipImpl<double>(cudaStream_t stream, const double* input_data, double* output_data, const double* min, const double* max, double min_default, double max_default, size_t count);
template void ClipImpl<MLFloat16>(cudaStream_t stream, const MLFloat16* input_data, MLFloat16* output_data, const MLFloat16* min, const MLFloat16* max, MLFloat16 min_default, MLFloat16 max_default, size_t count);
template void ClipImpl<int8_t>(cudaStream_t stream, const int8_t* input_data, int8_t* output_data, const int8_t* min, const int8_t* max, int8_t min_default, int8_t max_default, size_t count);
template void ClipImpl<uint8_t>(cudaStream_t stream, const uint8_t* input_data, uint8_t* output_data, const uint8_t* min, const uint8_t* max, uint8_t min_default, uint8_t max_default, size_t count);
template void ClipImpl<int64_t>(cudaStream_t stream, const int64_t* input_data, int64_t* output_data, const int64_t* min, const int64_t* max, int64_t min_default, int64_t max_default, size_t count);
template void ClipImpl<uint64_t>(cudaStream_t stream, const uint64_t* input_data, uint64_t* output_data, const uint64_t* min, const uint64_t* max, uint64_t min_default, uint64_t max_default, size_t count);
}
}
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include <hip/hip_runtime.h>
namespace onnxruntime {
namespace contrib {
namespace rocm {
template <typename T>
void GreedySearchTopOne(
const T* input,
int32_t batch_size,
int32_t vocab_size,
T* tmp_values,
int32_t* tmp_tokens,
T* output_values,
int32_t* output_tokens,
hipStream_t stream);
} // namespace rocm
} // namespace contrib
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include <cuda_runtime.h>
namespace onnxruntime {
namespace contrib {
namespace cuda {
template <typename T>
void GreedySearchTopOne(
const T* input,
int32_t batch_size,
int32_t vocab_size,
T* tmp_values,
int32_t* tmp_tokens,
T* output_values,
int32_t* output_tokens,
cudaStream_t stream);
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/rocm/rocm_common.h"
#include "core/providers/rocm/rocm_execution_provider.h"
#include "contrib_ops/rocm/transformers/sampling.h"
#include "contrib_ops/rocm/transformers/generation_device_helper.h"
#include "contrib_ops/rocm/transformers/dump_rocm_tensor.h"
namespace onnxruntime {
namespace contrib {
namespace rocm {
ONNX_OPERATOR_KERNEL_EX(
Sampling,
kMSDomain,
1,
kRocmExecutionProvider,
(*KernelDefBuilder::Create())
.InputMemoryType(OrtMemTypeCPUInput, 0) // 'input_ids' needs to be on CPU
.InputMemoryType(OrtMemTypeCPUInput, 1) // 'max_length' needs to be on CPU
.InputMemoryType(OrtMemTypeCPUInput, 2) // 'min_length' needs to be on CPU
.InputMemoryType(OrtMemTypeCPUInput, 3) // 'repetition_penalty' needs to be on CPU
.InputMemoryType(OrtMemTypeCPUInput, 6) // 'custom_attention_mask' needs to be on CPU
.OutputMemoryType(OrtMemTypeCPUOutput, 0) // 'sequences' output on CPU
.OutputMemoryType(OrtMemTypeCPUOutput, 1) // 'logits_to_debug' output on CPU
.TypeConstraint("T", {DataTypeImpl::GetTensorType<float>(),
DataTypeImpl::GetTensorType<MLFloat16>()}),
Sampling);
transformers::HipTensorConsoleDumper g_rocm_dumper_sampling;
Sampling::Sampling(const OpKernelInfo& info)
: onnxruntime::contrib::transformers::Sampling(info) {
SetDeviceHelpers(GenerationCudaDeviceHelper::ReorderPastState,
GenerationCudaDeviceHelper::AddToFeeds,
GenerationCudaDeviceHelper::TopK,
GenerationCudaDeviceHelper::DeviceCopy<float>,
GenerationCudaDeviceHelper::GreedySearchProcessLogits<float>,
GenerationCudaDeviceHelper::GreedySearchProcessLogits<MLFloat16>,
GenerationCudaDeviceHelper::InitGreedyState<float>,
GenerationCudaDeviceHelper::InitGreedyState<MLFloat16>);
SetDeviceHelpers_Gpt(GenerationCudaDeviceHelper::UpdateGptFeeds<float>,
GenerationCudaDeviceHelper::UpdateGptFeeds<MLFloat16>);
SetConsoleDumper(&g_rocm_dumper_sampling);
gpu_device_prop_ = &reinterpret_cast<const ROCMExecutionProvider*>(info.GetExecutionProvider())->GetDeviceProp();
gpu_device_arch_ = static_cast<const hipDeviceProp_t*>(gpu_device_prop_)->major * 100 +
static_cast<const hipDeviceProp_t*>(gpu_device_prop_)->minor * 10;
}
Status Sampling::ComputeInternal(OpKernelContext* context) const {
return onnxruntime::contrib::transformers::Sampling::Compute(context);
}
Status Sampling::Compute(OpKernelContext* context) const {
auto s = ComputeInternal(context);
if (s.IsOK()) {
auto err = hipGetLastError();
if (err != hipSuccess) {
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "ROCM error ", hipGetErrorName(err), ":", hipGetErrorString(err));
}
}
return s;
}
} // namespace rocm
} // namespace contrib
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/cuda_common.h"
#include "core/providers/cuda/cuda_execution_provider.h"
#include "contrib_ops/cuda/transformers/sampling.h"
#include "contrib_ops/cuda/transformers/generation_device_helper.h"
#include "contrib_ops/cuda/transformers/dump_cuda_tensor.h"
namespace onnxruntime {
namespace contrib {
namespace cuda {
ONNX_OPERATOR_KERNEL_EX(
Sampling,
kMSDomain,
1,
kCudaExecutionProvider,
(*KernelDefBuilder::Create())
.InputMemoryType(OrtMemTypeCPUInput, 0) // 'input_ids' needs to be on CPU
.InputMemoryType(OrtMemTypeCPUInput, 1) // 'max_length' needs to be on CPU
.InputMemoryType(OrtMemTypeCPUInput, 2) // 'min_length' needs to be on CPU
.InputMemoryType(OrtMemTypeCPUInput, 3) // 'repetition_penalty' needs to be on CPU
.InputMemoryType(OrtMemTypeCPUInput, 6) // 'custom_attention_mask' needs to be on CPU
.OutputMemoryType(OrtMemTypeCPUOutput, 0) // 'sequences' output on CPU
.OutputMemoryType(OrtMemTypeCPUOutput, 1) // 'logits_to_debug' output on CPU
.TypeConstraint("T", {DataTypeImpl::GetTensorType<float>(),
DataTypeImpl::GetTensorType<MLFloat16>()}),
Sampling);
transformers::CudaTensorConsoleDumper g_cuda_dumper_sampling;
Sampling::Sampling(const OpKernelInfo& info)
: onnxruntime::contrib::transformers::Sampling(info) {
SetDeviceHelpers(GenerationCudaDeviceHelper::ReorderPastState,
GenerationCudaDeviceHelper::AddToFeeds,
GenerationCudaDeviceHelper::TopK,
GenerationCudaDeviceHelper::DeviceCopy<float>,
GenerationCudaDeviceHelper::GreedySearchProcessLogits<float>,
GenerationCudaDeviceHelper::GreedySearchProcessLogits<MLFloat16>,
GenerationCudaDeviceHelper::InitGreedyState<float>,
GenerationCudaDeviceHelper::InitGreedyState<MLFloat16>);
SetDeviceHelpers_Gpt(GenerationCudaDeviceHelper::UpdateGptFeeds<float>,
GenerationCudaDeviceHelper::UpdateGptFeeds<MLFloat16>);
SetConsoleDumper(&g_cuda_dumper_sampling);
gpu_device_prop_ = &reinterpret_cast<const CUDAExecutionProvider*>(info.GetExecutionProvider())->GetDeviceProp();
gpu_device_arch_ = static_cast<const cudaDeviceProp*>(gpu_device_prop_)->major * 100 +
static_cast<const cudaDeviceProp*>(gpu_device_prop_)->minor * 10;
}
Status Sampling::ComputeInternal(OpKernelContext* context) const {
return onnxruntime::contrib::transformers::Sampling::Compute(context);
}
Status Sampling::Compute(OpKernelContext* context) const {
auto s = ComputeInternal(context);
if (s.IsOK()) {
auto err = cudaGetLastError();
if (err != cudaSuccess) {
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "CUDA error ", cudaGetErrorName(err), ":", cudaGetErrorString(err));
}
}
return s;
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "contrib_ops/cpu/transformers/sampling.h"
namespace onnxruntime {
class SessionState;
namespace contrib {
namespace rocm {
class Sampling final : public onnxruntime::contrib::transformers::Sampling {
public:
Sampling(const OpKernelInfo& info);
Status Compute(OpKernelContext* context) const override;
private:
Status ComputeInternal(OpKernelContext* context) const;
};
} // namespace rocm
} // namespace contrib
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "contrib_ops/cpu/transformers/sampling.h"
namespace onnxruntime {
class SessionState;
namespace contrib {
namespace cuda {
class Sampling final : public onnxruntime::contrib::transformers::Sampling {
public:
Sampling(const OpKernelInfo& info);
Status Compute(OpKernelContext* context) const override;
private:
Status ComputeInternal(OpKernelContext* context) const;
};
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "orttraining/training_ops/rocm/activation/activations_grad.h"
#include "core/framework/op_kernel.h"
namespace onnxruntime {
namespace rocm {
#define REGISTER_ACTIVATION_GRAD_KERNEL(x, ver, domain, T) \
ONNX_OPERATOR_TYPED_KERNEL_EX( \
x, \
domain, \
ver, \
T, \
kRocmExecutionProvider, \
(*KernelDefBuilder::Create()) \
.TypeConstraint("T", DataTypeImpl::GetTensorType<T>()) \
.MayInplace(0, 0), \
x<T>);
#define BINARY_ELEMENTWISE_COMPUTE(x, T) \
template <> \
Status x<T>::ComputeInternal(OpKernelContext* context) const { \
BinaryElementwisePreparation prepare; \
ORT_RETURN_IF_ERROR(Prepare(context, &prepare)); \
Ctx##x func_ctx = MakeFuncCtx(); \
Impl_##x<typename ToHipType<T>::MappedType>( \
Stream(context), \
reinterpret_cast<const typename ToHipType<T>::MappedType*>(prepare.lhs_tensor->template Data<T>()), \
reinterpret_cast<const typename ToHipType<T>::MappedType*>(prepare.rhs_tensor->template Data<T>()), \
reinterpret_cast<typename ToHipType<T>::MappedType*>(prepare.output_tensor->template MutableData<T>()), \
&func_ctx, prepare.output_tensor->Shape().Size()); \
return Status::OK(); \
}
#define ACTIVATION_GRAD_OP_TYPED(name, ver, domain, T) \
REGISTER_ACTIVATION_GRAD_KERNEL(name, ver, domain, T) \
BINARY_ELEMENTWISE_COMPUTE(name, T)
#define ACTIVATION_GRAD_OP_HFD(name, ver, domain) \
ACTIVATION_GRAD_OP_TYPED(name, ver, domain, MLFloat16) \
ACTIVATION_GRAD_OP_TYPED(name, ver, domain, float) \
ACTIVATION_GRAD_OP_TYPED(name, ver, domain, double)
ACTIVATION_GRAD_OP_HFD(GeluGrad, 1, kMSDomain);
ACTIVATION_GRAD_OP_HFD(FastGeluGrad, 1, kMSDomain);
ACTIVATION_GRAD_OP_HFD(ReluGrad, 1, kMSDomain);
ACTIVATION_GRAD_OP_HFD(SigmoidGrad, 1, kMSDomain);
ACTIVATION_GRAD_OP_HFD(QuickGeluGrad, 1, kMSDomain);
ACTIVATION_GRAD_OP_HFD(TanhGrad, 1, kMSDomain);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "orttraining/training_ops/cuda/activation/activations_grad.h"
#include "core/framework/op_kernel.h"
namespace onnxruntime {
namespace cuda {
#define REGISTER_ACTIVATION_GRAD_KERNEL(x, ver, domain, T) \
ONNX_OPERATOR_TYPED_KERNEL_EX( \
x, \
domain, \
ver, \
T, \
kCudaExecutionProvider, \
(*KernelDefBuilder::Create()) \
.TypeConstraint("T", DataTypeImpl::GetTensorType<T>()) \
.MayInplace(0, 0), \
x<T>);
#define BINARY_ELEMENTWISE_COMPUTE(x, T) \
template <> \
Status x<T>::ComputeInternal(OpKernelContext* context) const { \
BinaryElementwisePreparation prepare; \
ORT_RETURN_IF_ERROR(Prepare(context, &prepare)); \
Ctx##x func_ctx = MakeFuncCtx(); \
Impl_##x<typename ToCudaType<T>::MappedType>( \
Stream(context), \
reinterpret_cast<const typename ToCudaType<T>::MappedType*>(prepare.lhs_tensor->template Data<T>()), \
reinterpret_cast<const typename ToCudaType<T>::MappedType*>(prepare.rhs_tensor->template Data<T>()), \
reinterpret_cast<typename ToCudaType<T>::MappedType*>(prepare.output_tensor->template MutableData<T>()), \
&func_ctx, prepare.output_tensor->Shape().Size()); \
return Status::OK(); \
}
#define ACTIVATION_GRAD_OP_TYPED(name, ver, domain, T) \
REGISTER_ACTIVATION_GRAD_KERNEL(name, ver, domain, T) \
BINARY_ELEMENTWISE_COMPUTE(name, T)
#define ACTIVATION_GRAD_OP_HFD(name, ver, domain) \
ACTIVATION_GRAD_OP_TYPED(name, ver, domain, MLFloat16) \
ACTIVATION_GRAD_OP_TYPED(name, ver, domain, float) \
ACTIVATION_GRAD_OP_TYPED(name, ver, domain, double)
ACTIVATION_GRAD_OP_HFD(GeluGrad, 1, kMSDomain);
ACTIVATION_GRAD_OP_HFD(FastGeluGrad, 1, kMSDomain);
ACTIVATION_GRAD_OP_HFD(ReluGrad, 1, kMSDomain);
ACTIVATION_GRAD_OP_HFD(SigmoidGrad, 1, kMSDomain);
ACTIVATION_GRAD_OP_HFD(QuickGeluGrad, 1, kMSDomain);
ACTIVATION_GRAD_OP_HFD(TanhGrad, 1, kMSDomain);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/rocm_common.h"
#include "core/providers/rocm/math/binary_elementwise_ops.h"
#include "core/providers/rocm/activation/activations.h"
#include "orttraining/training_ops/rocm/activation/activations_grad_impl.h"
namespace onnxruntime {
namespace rocm {
template <typename T>
class GeluGrad final : public BinaryElementwise<ShouldNotBroadcast> {
public:
GeluGrad(const OpKernelInfo& info) : BinaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
private:
MAKE_FUNC_CTX_NULL()
};
template <typename T>
class FastGeluGrad final : public BinaryElementwise<ShouldNotBroadcast> {
public:
FastGeluGrad(const OpKernelInfo& info) : BinaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
private:
MAKE_FUNC_CTX_NULL()
};
template <typename T>
class ReluGrad final : public BinaryElementwise<ShouldNotBroadcast> {
public:
ReluGrad(const OpKernelInfo& info) : BinaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
private:
MAKE_FUNC_CTX_NULL()
};
template <typename T>
class SigmoidGrad final : public BinaryElementwise<ShouldNotBroadcast> {
public:
SigmoidGrad(const OpKernelInfo& info) : BinaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
private:
MAKE_FUNC_CTX_NULL()
};
template <typename T>
class QuickGeluGrad final : public BinaryElementwise<ShouldNotBroadcast> {
public:
QuickGeluGrad(const OpKernelInfo& info) : BinaryElementwise(info) {
alpha_ = info.GetAttrOrDefault<float>("alpha", 1.702f);
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
MAKE_FUNC_CTX_ALPHA()
float alpha_;
};
template <typename T>
class TanhGrad final : public BinaryElementwise<ShouldNotBroadcast> {
public:
TanhGrad(const OpKernelInfo& info) : BinaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
private:
MAKE_FUNC_CTX_NULL()
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/cuda_common.h"
#include "core/providers/cuda/math/binary_elementwise_ops.h"
#include "core/providers/cuda/activation/activations.h"
#include "orttraining/training_ops/cuda/activation/activations_grad_impl.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
class GeluGrad final : public BinaryElementwise<ShouldNotBroadcast> {
public:
GeluGrad(const OpKernelInfo& info) : BinaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
private:
MAKE_FUNC_CTX_NULL()
};
template <typename T>
class FastGeluGrad final : public BinaryElementwise<ShouldNotBroadcast> {
public:
FastGeluGrad(const OpKernelInfo& info) : BinaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
private:
MAKE_FUNC_CTX_NULL()
};
template <typename T>
class ReluGrad final : public BinaryElementwise<ShouldNotBroadcast> {
public:
ReluGrad(const OpKernelInfo& info) : BinaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
private:
MAKE_FUNC_CTX_NULL()
};
template <typename T>
class SigmoidGrad final : public BinaryElementwise<ShouldNotBroadcast> {
public:
SigmoidGrad(const OpKernelInfo& info) : BinaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
private:
MAKE_FUNC_CTX_NULL()
};
template <typename T>
class QuickGeluGrad final : public BinaryElementwise<ShouldNotBroadcast> {
public:
QuickGeluGrad(const OpKernelInfo& info) : BinaryElementwise(info) {
alpha_ = info.GetAttrOrDefault<float>("alpha", 1.702f);
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
MAKE_FUNC_CTX_ALPHA()
float alpha_;
};
template <typename T>
class TanhGrad final : public BinaryElementwise<ShouldNotBroadcast> {
public:
TanhGrad(const OpKernelInfo& info) : BinaryElementwise(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
private:
MAKE_FUNC_CTX_NULL()
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/activation/activations_impl.h"
namespace onnxruntime {
namespace rocm {
typedef onnxruntime::rocm::CtxNull CtxGeluGrad;
typedef onnxruntime::rocm::CtxNull CtxFastGeluGrad;
typedef onnxruntime::rocm::CtxNull CtxReluGrad;
typedef onnxruntime::rocm::CtxNull CtxSigmoidGrad;
typedef onnxruntime::rocm::CtxAlpha CtxQuickGeluGrad;
typedef onnxruntime::rocm::CtxNull CtxTanhGrad;
#define ACTIVATION_GRAD_OPS() \
ACTIVATION_GRAD_OP_NAME(GeluGrad) \
ACTIVATION_GRAD_OP_NAME(FastGeluGrad) \
ACTIVATION_GRAD_OP_NAME(ReluGrad) \
ACTIVATION_GRAD_OP_NAME(SigmoidGrad) \
ACTIVATION_GRAD_OP_NAME(QuickGeluGrad) \
ACTIVATION_GRAD_OP_NAME(TanhGrad)
#define BINARY_ELEMENTWISE_IMPL_DECLARATION(name) \
template <typename T> \
void Impl_##name(hipStream_t stream, \
const T* lhs_data, \
const T* rhs_data, \
T* output_data, \
const Ctx##name* func_ctx, \
size_t count)
#define ACTIVATION_GRAD_OP_NAME(name) BINARY_ELEMENTWISE_IMPL_DECLARATION(name);
ACTIVATION_GRAD_OPS()
#undef ACTIVATION_GRAD_OP_NAME
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/activation/activations_impl.h"
namespace onnxruntime {
namespace cuda {
typedef onnxruntime::cuda::CtxNull CtxGeluGrad;
typedef onnxruntime::cuda::CtxNull CtxFastGeluGrad;
typedef onnxruntime::cuda::CtxNull CtxReluGrad;
typedef onnxruntime::cuda::CtxNull CtxSigmoidGrad;
typedef onnxruntime::cuda::CtxAlpha CtxQuickGeluGrad;
typedef onnxruntime::cuda::CtxNull CtxTanhGrad;
#define ACTIVATION_GRAD_OPS() \
ACTIVATION_GRAD_OP_NAME(GeluGrad) \
ACTIVATION_GRAD_OP_NAME(FastGeluGrad) \
ACTIVATION_GRAD_OP_NAME(ReluGrad) \
ACTIVATION_GRAD_OP_NAME(SigmoidGrad) \
ACTIVATION_GRAD_OP_NAME(QuickGeluGrad) \
ACTIVATION_GRAD_OP_NAME(TanhGrad)
#define BINARY_ELEMENTWISE_IMPL_DECLARATION(name) \
template <typename T> \
void Impl_##name(cudaStream_t stream, \
const T* lhs_data, \
const T* rhs_data, \
T* output_data, \
const Ctx##name* func_ctx, \
size_t count)
#define ACTIVATION_GRAD_OP_NAME(name) BINARY_ELEMENTWISE_IMPL_DECLARATION(name);
ACTIVATION_GRAD_OPS()
#undef ACTIVATION_GRAD_OP_NAME
} // namespace cuda
} // namespace onnxruntime
### |
#include "orttraining/training_ops/rocm/activation/bias_gelu_grad.h"
#include "core/common/common.h"
#include "orttraining/training_ops/cpu/activation/gelu_computation_mode.h"
#include "orttraining/training_ops/rocm/activation/bias_gelu_grad_impl.h"
namespace onnxruntime {
namespace rocm {
ONNX_OPERATOR_KERNEL_EX(
BiasGeluGrad_dX, kMSDomain, 1, kRocmExecutionProvider, (*KernelDefBuilder::Create())
.TypeConstraint("T", BuildKernelDefConstraints<MLFloat16, float, double, BFloat16>())
.MayInplace(0, 0), BiasGeluGrad_dX<gelu_computation_mode::Default>);
ONNX_OPERATOR_KERNEL_EX(
BiasFastGeluGrad_dX, kMSDomain, 1, kRocmExecutionProvider, (*KernelDefBuilder::Create())
.TypeConstraint("T", BuildKernelDefConstraints<MLFloat16, float, double, BFloat16>())
.MayInplace(0, 0), BiasGeluGrad_dX<gelu_computation_mode::Approximation>);
template <typename GeluComputationMode>
template <typename T>
void BiasGeluGrad_dX<GeluComputationMode>::KernelLaunchDispatcher<T>::operator()(
hipStream_t stream, int64_t input_size, int64_t bias_size, const Tensor& dY, const Tensor& X, const Tensor& B, Tensor& dX) const {
using HipT = typename ToHipType<T>::MappedType;
LaunchBiasGeluGradDxKernel<HipT, GeluComputationMode>(
stream, input_size, bias_size, reinterpret_cast<const HipT*>(dY.template Data<T>()), reinterpret_cast<const HipT*>(X.template Data<T>()), reinterpret_cast<const HipT*>(B.template Data<T>()), reinterpret_cast<HipT*>(dX.template MutableData<T>()));
}
template <typename GeluComputationMode>
Status BiasGeluGrad_dX<GeluComputationMode>::ComputeInternal(OpKernelContext* context) const {
const auto* dY = context->Input<Tensor>(0);
ORT_ENFORCE(dY);
const auto* X = context->Input<Tensor>(1);
ORT_ENFORCE(X);
const auto* B = context->Input<Tensor>(2);
ORT_ENFORCE(B);
const auto& input_shape = X->Shape();
ORT_ENFORCE(input_shape == dY->Shape(), "dY and X must have the same shape.");
const auto& bias_shape = B->Shape();
ORT_ENFORCE(
input_shape.NumDimensions() >= 1 && bias_shape.NumDimensions() == 1 &&
input_shape.GetDims().back() == bias_shape.GetDims().back(), "B must be 1-dimensional and match the last dimension of X.");
auto* dX = context->Output(0, input_shape);
ORT_ENFORCE(dX);
const auto input_size = input_shape.Size(), bias_size = bias_shape.Size();
utils::MLTypeCallDispatcher<MLFloat16, float, double, BFloat16> dispatcher{X->GetElementType()};
dispatcher.Invoke<KernelLaunchDispatcher>(Stream(context), input_size, bias_size, *dY, *X, *B, *dX);
return Status::OK();
}
}
} ### |
#include "orttraining/training_ops/cuda/activation/bias_gelu_grad.h"
#include "core/common/common.h"
#include "orttraining/training_ops/cpu/activation/gelu_computation_mode.h"
#include "orttraining/training_ops/cuda/activation/bias_gelu_grad_impl.h"
namespace onnxruntime {
namespace cuda {
ONNX_OPERATOR_KERNEL_EX(
BiasGeluGrad_dX, kMSDomain, 1, kCudaExecutionProvider, (*KernelDefBuilder::Create())
.TypeConstraint("T", BuildKernelDefConstraints<MLFloat16, float, double, BFloat16>())
.MayInplace(0, 0), BiasGeluGrad_dX<gelu_computation_mode::Default>);
ONNX_OPERATOR_KERNEL_EX(
BiasFastGeluGrad_dX, kMSDomain, 1, kCudaExecutionProvider, (*KernelDefBuilder::Create())
.TypeConstraint("T", BuildKernelDefConstraints<MLFloat16, float, double, BFloat16>())
.MayInplace(0, 0), BiasGeluGrad_dX<gelu_computation_mode::Approximation>);
template <typename GeluComputationMode>
template <typename T>
void BiasGeluGrad_dX<GeluComputationMode>::KernelLaunchDispatcher<T>::operator()(
cudaStream_t stream, int64_t input_size, int64_t bias_size, const Tensor& dY, const Tensor& X, const Tensor& B, Tensor& dX) const {
using CudaT = typename ToCudaType<T>::MappedType;
LaunchBiasGeluGradDxKernel<CudaT, GeluComputationMode>(
stream, input_size, bias_size, reinterpret_cast<const CudaT*>(dY.template Data<T>()), reinterpret_cast<const CudaT*>(X.template Data<T>()), reinterpret_cast<const CudaT*>(B.template Data<T>()), reinterpret_cast<CudaT*>(dX.template MutableData<T>()));
}
template <typename GeluComputationMode>
Status BiasGeluGrad_dX<GeluComputationMode>::ComputeInternal(OpKernelContext* context) const {
const auto* dY = context->Input<Tensor>(0);
ORT_ENFORCE(dY);
const auto* X = context->Input<Tensor>(1);
ORT_ENFORCE(X);
const auto* B = context->Input<Tensor>(2);
ORT_ENFORCE(B);
const auto& input_shape = X->Shape();
ORT_ENFORCE(input_shape == dY->Shape(), "dY and X must have the same shape.");
const auto& bias_shape = B->Shape();
ORT_ENFORCE(
input_shape.NumDimensions() >= 1 && bias_shape.NumDimensions() == 1 &&
input_shape.GetDims().back() == bias_shape.GetDims().back(), "B must be 1-dimensional and match the last dimension of X.");
auto* dX = context->Output(0, input_shape);
ORT_ENFORCE(dX);
const auto input_size = input_shape.Size(), bias_size = bias_shape.Size();
utils::MLTypeCallDispatcher<MLFloat16, float, double, BFloat16> dispatcher{X->GetElementType()};
dispatcher.Invoke<KernelLaunchDispatcher>(Stream(context), input_size, bias_size, *dY, *X, *B, *dX);
return Status::OK();
}
}
}
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
template <typename GeluComputationMode>
class BiasGeluGrad_dX : public RocmKernel {
public:
BiasGeluGrad_dX(const OpKernelInfo& info) : RocmKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
private:
template <typename T>
struct KernelLaunchDispatcher {
void operator()(
hipStream_t stream,
int64_t input_size, int64_t bias_size,
const Tensor& dY, const Tensor& X, const Tensor& B,
Tensor& dX) const;
};
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
template <typename GeluComputationMode>
class BiasGeluGrad_dX : public CudaKernel {
public:
BiasGeluGrad_dX(const OpKernelInfo& info) : CudaKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
private:
template <typename T>
struct KernelLaunchDispatcher {
void operator()(
cudaStream_t stream,
int64_t input_size, int64_t bias_size,
const Tensor& dY, const Tensor& X, const Tensor& B,
Tensor& dX) const;
};
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <cstdint>
namespace onnxruntime {
namespace rocm {
// assumptions:
// - dY, X, dX have input_size elements
// - B has bias_size elements
// - input_size % bias_size == 0
template <typename T, typename GeluComputationMode>
void LaunchBiasGeluGradDxKernel(
hipStream_t stream,
int64_t input_size, int64_t bias_size,
const T* dY, const T* X, const T* B, T* dX);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <cstdint>
namespace onnxruntime {
namespace cuda {
// assumptions:
// - dY, X, dX have input_size elements
// - B has bias_size elements
// - input_size % bias_size == 0
template <typename T, typename GeluComputationMode>
void LaunchBiasGeluGradDxKernel(
cudaStream_t stream,
int64_t input_size, int64_t bias_size,
const T* dY, const T* X, const T* B, T* dX);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/rocm_kernel.h"
#include "orttraining/core/graph/horovod_adapters.h"
#include "orttraining/core/graph/optimizer_config.h"
namespace onnxruntime {
namespace rocm {
class HorovodAllReduce final : public RocmKernel {
public:
HorovodAllReduce(const OpKernelInfo& info) : RocmKernel(info) {
unique_name = "AllReduceNode_" + info.node().Name();
int64_t reduce_op;
// bugbug
int64_t adasum_type = training::AdasumReductionType::None;
info.GetAttrOrDefault("reduce_op", &reduce_op, static_cast<int64_t>(hvd::ReduceOp::SUM));
info.GetAttrOrDefault("reduce_algo", &adasum_type, static_cast<int64_t>(training::AdasumReductionType::None));
reduce_op_ = GetReduceOp(reduce_op);
adasum_type_ = static_cast<training::AdasumReductionType>(adasum_type);
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
std::string unique_name;
hvd::ReduceOp reduce_op_;
training::AdasumReductionType adasum_type_;
};
class HorovodBarrier final : public RocmKernel {
public:
HorovodBarrier(const OpKernelInfo& info) : RocmKernel(info) {
// bugbug
int64_t adasum_type = training::AdasumReductionType::None;
info.GetAttrOrDefault("reduce_algo", &adasum_type, static_cast<int64_t>(training::AdasumReductionType::None));
adasum_type_ = static_cast<training::AdasumReductionType>(adasum_type);
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
training::AdasumReductionType adasum_type_;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/cuda_kernel.h"
#include "orttraining/core/graph/horovod_adapters.h"
#include "orttraining/core/graph/optimizer_config.h"
namespace onnxruntime {
namespace cuda {
class HorovodAllReduce final : public CudaKernel {
public:
HorovodAllReduce(const OpKernelInfo& info) : CudaKernel(info) {
unique_name = "AllReduceNode_" + info.node().Name();
int64_t reduce_op;
// bugbug
int64_t adasum_type = training::AdasumReductionType::None;
info.GetAttrOrDefault("reduce_op", &reduce_op, static_cast<int64_t>(hvd::ReduceOp::SUM));
info.GetAttrOrDefault("reduce_algo", &adasum_type, static_cast<int64_t>(training::AdasumReductionType::None));
reduce_op_ = GetReduceOp(reduce_op);
adasum_type_ = static_cast<training::AdasumReductionType>(adasum_type);
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
std::string unique_name;
hvd::ReduceOp reduce_op_;
training::AdasumReductionType adasum_type_;
};
class HorovodBarrier final : public CudaKernel {
public:
HorovodBarrier(const OpKernelInfo& info) : CudaKernel(info) {
// bugbug
int64_t adasum_type = training::AdasumReductionType::None;
info.GetAttrOrDefault("reduce_algo", &adasum_type, static_cast<int64_t>(training::AdasumReductionType::None));
adasum_type_ = static_cast<training::AdasumReductionType>(adasum_type);
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
training::AdasumReductionType adasum_type_;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/math/clip.h"
#include "core/providers/rocm/rocm_common.h"
#include "core/providers/rocm/shared_inc/rocm_utils.h"
namespace onnxruntime {
namespace rocm {
template <typename T>
void ClipImpl(hipStream_t stream, const T* input_data, T* output_data, const T* min, const T* max, T min_default, T max_default, size_t count);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/math/clip.h"
#include "core/providers/cuda/cuda_common.h"
#include "core/providers/cuda/shared_inc/cuda_utils.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
void ClipImpl(cudaStream_t stream, const T* input_data, T* output_data, const T* min, const T* max, T min_default, T max_default, size_t count);
} // namespace cuda
} // namespace onnxruntime
### |
#pragma once
#include "core/providers/rocm/cu_inc/common.cuh"
#include "core/providers/rocm/shared_inc/rocm_utils.h"
namespace onnxruntime {
namespace rocm {
template <int NumUnroll>
__device__ __forceinline__ void SetBitmask(const HIP_LONG id, const HIP_LONG mask_element_count, const fast_divmod fdm_bits_per_element, BitmaskElementType thread_bitmask, BitmaskElementType* mask_data) {
int bitmask_idx, bitmask_shift;
fdm_bits_per_element.divmod(id, bitmask_idx, bitmask_shift);
BitmaskElementType bitmask = (thread_bitmask << bitmask_shift);
#if defined(USE_ROCM) && __CUDA_ARCH__ >= 800
BitmaskElementType thread_mask = __match_any_sync(0xFFFFFFFF, bitmask_idx);
bitmask = __reduce_or_sync(thread_mask, bitmask);
#else
#pragma unroll
for (int stride = kNumBitsPerBitmaskElement / (NumUnroll * 2); stride > 0; stride /= 2) {
bitmask |= WARP_SHFL_DOWN(bitmask, stride);
}
#endif
if (bitmask_shift == 0 && bitmask_idx < mask_element_count) {
mask_data[bitmask_idx] = bitmask;
}
}
template <int NumUnroll>
__device__ __forceinline__ void GetMasks(HIP_LONG id, const fast_divmod fdm_bits_per_element, const BitmaskElementType* mask_data, bool* mask_result) {
int bitmask_idx, bitmask_shift;
fdm_bits_per_element.divmod(id, bitmask_idx, bitmask_shift);
BitmaskElementType shifted_mask = mask_data[bitmask_idx] >> bitmask_shift;
#pragma unroll
for (int i = 0; i < NumUnroll; i++) {
mask_result[i] = (shifted_mask & (1 << i)) != 0;
}
}
}
} ### |
#pragma once
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/shared_inc/cuda_utils.h"
namespace onnxruntime {
namespace cuda {
template <int NumUnroll>
__device__ __forceinline__ void SetBitmask(const CUDA_LONG id, const CUDA_LONG mask_element_count, const fast_divmod fdm_bits_per_element, BitmaskElementType thread_bitmask, BitmaskElementType* mask_data) {
int bitmask_idx, bitmask_shift;
fdm_bits_per_element.divmod(id, bitmask_idx, bitmask_shift);
BitmaskElementType bitmask = (thread_bitmask << bitmask_shift);
#if defined(USE_CUDA) && __CUDA_ARCH__ >= 800
BitmaskElementType thread_mask = __match_any_sync(0xFFFFFFFF, bitmask_idx);
bitmask = __reduce_or_sync(thread_mask, bitmask);
#else
#pragma unroll
for (int stride = kNumBitsPerBitmaskElement / (NumUnroll * 2); stride > 0; stride /= 2) {
bitmask |= WARP_SHFL_DOWN(bitmask, stride);
}
#endif
if (bitmask_shift == 0 && bitmask_idx < mask_element_count) {
mask_data[bitmask_idx] = bitmask;
}
}
template <int NumUnroll>
__device__ __forceinline__ void GetMasks(CUDA_LONG id, const fast_divmod fdm_bits_per_element, const BitmaskElementType* mask_data, bool* mask_result) {
int bitmask_idx, bitmask_shift;
fdm_bits_per_element.divmod(id, bitmask_idx, bitmask_shift);
BitmaskElementType shifted_mask = mask_data[bitmask_idx] >> bitmask_shift;
#pragma unroll
for (int i = 0; i < NumUnroll; i++) {
mask_result[i] = (shifted_mask & (1 << i)) != 0;
}
}
}
}
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/rocm_kernel.h"
#include "orttraining/core/framework/distributed_run_context.h"
#if defined(ORT_USE_NCCL)
#include <rccl/rccl.h>
#endif
namespace onnxruntime {
namespace rocm {
#if defined(ORT_USE_NCCL)
#define NCCL_RETURN_IF_ERROR(expr) ORT_RETURN_IF_ERROR(NCCL_CALL(expr))
#endif
class NcclContext final {
public:
NcclContext();
~NcclContext();
ncclComm_t Comm(training::WorkerGroupType group_type);
int Rank(training::WorkerGroupType group_type) const {
return training::DistributedRunContext::RankInGroup(group_type);
}
int Size(training::WorkerGroupType group_type) const {
return training::DistributedRunContext::GroupSize(group_type);
}
private:
ncclComm_t global_group_comm_;
ncclComm_t data_group_comm_;
ncclComm_t node_local_comm_;
ncclComm_t cross_node_comm_;
ncclComm_t horizontal_group_comm_;
};
// -----------------------------------------------------------------------
// Base class for NCCL kernels
// -----------------------------------------------------------------------
class NcclKernel : public RocmKernel {
public:
explicit NcclKernel(const OpKernelInfo& info);
protected:
NcclContext* nccl_ = nullptr;
training::WorkerGroupType group_type_;
};
ncclDataType_t GetNcclDataType(onnxruntime::MLDataType type);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/cuda_kernel.h"
#include "orttraining/core/framework/distributed_run_context.h"
#if defined(ORT_USE_NCCL)
#include <nccl.h>
#endif
namespace onnxruntime {
namespace cuda {
#if defined(ORT_USE_NCCL)
#define NCCL_RETURN_IF_ERROR(expr) ORT_RETURN_IF_ERROR(NCCL_CALL(expr))
#endif
class NcclContext final {
public:
NcclContext();
~NcclContext();
ncclComm_t Comm(training::WorkerGroupType group_type);
int Rank(training::WorkerGroupType group_type) const {
return training::DistributedRunContext::RankInGroup(group_type);
}
int Size(training::WorkerGroupType group_type) const {
return training::DistributedRunContext::GroupSize(group_type);
}
private:
ncclComm_t global_group_comm_;
ncclComm_t data_group_comm_;
ncclComm_t node_local_comm_;
ncclComm_t cross_node_comm_;
ncclComm_t horizontal_group_comm_;
};
// -----------------------------------------------------------------------
// Base class for NCCL kernels
// -----------------------------------------------------------------------
class NcclKernel : public CudaKernel {
public:
explicit NcclKernel(const OpKernelInfo& info);
protected:
NcclContext* nccl_ = nullptr;
training::WorkerGroupType group_type_;
};
ncclDataType_t GetNcclDataType(onnxruntime::MLDataType type);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "nccl_common.h"
namespace onnxruntime {
namespace rocm {
class NcclAllReduce final : public NcclKernel {
public:
explicit NcclAllReduce(const OpKernelInfo& info);
Status ComputeInternal(OpKernelContext* context) const override;
};
class NcclAllGather final : public NcclKernel {
public:
explicit NcclAllGather(const OpKernelInfo& info);
Status ComputeInternal(OpKernelContext* context) const override;
};
class NcclReduceScatter final : public NcclKernel {
public:
explicit NcclReduceScatter(const OpKernelInfo& info);
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "nccl_common.h"
namespace onnxruntime {
namespace cuda {
class NcclAllReduce final : public NcclKernel {
public:
explicit NcclAllReduce(const OpKernelInfo& info);
Status ComputeInternal(OpKernelContext* context) const override;
};
class NcclAllGather final : public NcclKernel {
public:
explicit NcclAllGather(const OpKernelInfo& info);
Status ComputeInternal(OpKernelContext* context) const override;
};
class NcclReduceScatter final : public NcclKernel {
public:
explicit NcclReduceScatter(const OpKernelInfo& info);
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#if defined(ORT_USE_NCCL) || defined(USE_MPI)
#pragma once
#include "core/common/common.h"
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
class Recv final : public RocmKernel {
public:
Recv(const OpKernelInfo& info) : RocmKernel(info) {
ORT_ENFORCE(info.GetAttr<int64_t>("tag", &tag_).IsOK());
ORT_ENFORCE(info.GetAttrs<int64_t>("element_types", element_types_).IsOK());
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
void ReceiveData(
const int num_tensors,
std::vector<Tensor*> received_tensors,
const int src,
const size_t aggregated_aligned_tensor_bytes,
OpKernelContext* context,
IAllocatorUniquePtr<char>& buffer) const;
int64_t tag_;
std::vector<int64_t> element_types_;
};
} // namespace rocm
} // namespace onnxruntime
#endif
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#if defined(ORT_USE_NCCL) || defined(USE_MPI)
#pragma once
#include "core/common/common.h"
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
class Recv final : public CudaKernel {
public:
Recv(const OpKernelInfo& info) : CudaKernel(info) {
ORT_ENFORCE(info.GetAttr<int64_t>("tag", &tag_).IsOK());
ORT_ENFORCE(info.GetAttrs<int64_t>("element_types", element_types_).IsOK());
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
void ReceiveData(
const int num_tensors,
std::vector<Tensor*> received_tensors,
const int src,
const size_t aggregated_aligned_tensor_bytes,
OpKernelContext* context,
IAllocatorUniquePtr<char>& buffer) const;
int64_t tag_;
std::vector<int64_t> element_types_;
};
} // namespace cuda
} // namespace onnxruntime
#endif
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#if defined(ORT_USE_NCCL) || defined(USE_MPI)
#pragma once
#include "core/common/common.h"
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
class Send final : public RocmKernel {
public:
Send(const OpKernelInfo& info) : RocmKernel(info) {
ORT_ENFORCE(info.GetAttr<int64_t>("tag", &tag_).IsOK());
ORT_ENFORCE(info.GetAttrs<int64_t>("element_types", element_types_).IsOK());
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
void SendData(
OpKernelContext* ctx,
const int dst,
const int num_tensors,
size_t aggregated_aligned_tensor_bytes,
std::vector<size_t> tensor_offsets_in_bytes,
std::vector<size_t> tensor_sizes_in_bytes) const;
int64_t tag_;
std::vector<int64_t> element_types_;
};
} // namespace rocm
} // namespace onnxruntime
#endif
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#if defined(ORT_USE_NCCL) || defined(USE_MPI)
#pragma once
#include "core/common/common.h"
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
class Send final : public CudaKernel {
public:
Send(const OpKernelInfo& info) : CudaKernel(info) {
ORT_ENFORCE(info.GetAttr<int64_t>("tag", &tag_).IsOK());
ORT_ENFORCE(info.GetAttrs<int64_t>("element_types", element_types_).IsOK());
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
void SendData(
OpKernelContext* ctx,
const int dst,
const int num_tensors,
size_t aggregated_aligned_tensor_bytes,
std::vector<size_t> tensor_offsets_in_bytes,
std::vector<size_t> tensor_sizes_in_bytes) const;
int64_t tag_;
std::vector<int64_t> element_types_;
};
} // namespace cuda
} // namespace onnxruntime
#endif
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/shared_library/provider_api.h"
#include "orttraining/training_ops/cpu/controlflow/group.h"
#include "core/providers/rocm/rocm_fwd.h"
namespace onnxruntime {
namespace rocm {
ONNX_OPERATOR_KERNEL_EX(
Group,
kMSDomain,
1,
kRocmExecutionProvider,
(*KernelDefBuilder::Create())
.OutputMemoryType(OrtMemTypeCPUOutput, 0)
.TypeConstraint("B", DataTypeImpl::GetTensorType<bool>())
.TypeConstraint("T", DataTypeImpl::AllTensorTypes()),
onnxruntime::contrib::Group);
ONNX_OPERATOR_KERNEL_EX(
PassThrough,
kMSDomain,
1,
kRocmExecutionProvider,
(*KernelDefBuilder::Create())
.TypeConstraint("T", DataTypeImpl::AllTensorTypes())
.VariadicAlias(0, 0), // outputs and inputs are mapped one to one
onnxruntime::contrib::PassThrough);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/shared_library/provider_api.h"
#include "orttraining/training_ops/cpu/controlflow/group.h"
#include "core/providers/cuda/cuda_fwd.h"
namespace onnxruntime {
namespace cuda {
ONNX_OPERATOR_KERNEL_EX(
Group,
kMSDomain,
1,
kCudaExecutionProvider,
(*KernelDefBuilder::Create())
.OutputMemoryType(OrtMemTypeCPUOutput, 0)
.TypeConstraint("B", DataTypeImpl::GetTensorType<bool>())
.TypeConstraint("T", DataTypeImpl::AllTensorTypes()),
onnxruntime::contrib::Group);
ONNX_OPERATOR_KERNEL_EX(
PassThrough,
kMSDomain,
1,
kCudaExecutionProvider,
(*KernelDefBuilder::Create())
.TypeConstraint("T", DataTypeImpl::AllTensorTypes())
.VariadicAlias(0, 0), // outputs and inputs are mapped one to one
onnxruntime::contrib::PassThrough);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "orttraining/training_ops/rocm/controlflow/record.h"
#include "core/providers/cpu/tensor/utils.h"
// Include event mechanism shared by CPU and GPU implementations.
#include "orttraining/training_ops/cpu/controlflow/event_pool.h"
#include "orttraining/training_ops/cpu/controlflow/record.h"
#include "core/providers/rocm/nvtx_profile.h"
#include "core/providers/rocm/nvtx_profile_context.h"
namespace onnxruntime {
namespace rocm {
ONNX_OPERATOR_KERNEL_EX(
RecordEvent,
kMSDomain,
1,
kRocmExecutionProvider,
(*KernelDefBuilder::Create())
.InputMemoryType(OrtMemTypeCPUInput, 0) /* Keep EventIdentifier in CPU */
.TypeConstraint("TInt64", DataTypeImpl::GetTensorType<int64_t>())
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes())
.VariadicAlias(1, 0), // outputs and inputs are mapped one to one, with input offset by 1
RecordEvent);
Status RecordEvent::ComputeInternal(OpKernelContext* ctx) const {
#ifdef ENABLE_NVTX_PROFILE
const Tensor* event_id_tensor = ctx->Input<Tensor>(0);
const int64_t event_id = *(event_id_tensor->template Data<int64_t>());
auto& profile_context = profile::Context::GetInstance();
const auto tag = profile_context.GetThreadTagOrDefault(std::this_thread::get_id());
profile::NvtxRangeCreator range(
"Batch-" + tag + " Record-" + std::to_string(event_id), profile::Color::Magenta);
range.Begin();
#endif
// Reuse CPU helper to record event because event tensor is a CPU tensor.
onnxruntime::contrib::record_event_in_tensor(*ctx->Input<Tensor>(0));
ORT_ENFORCE(ctx->GetComputeStream());
for (int i_out = 0; i_out < ctx->OutputCount(); ++i_out) {
// This iteration copies (i-1)-th input to i-th output.
const Tensor* X = ctx->Input<Tensor>(i_out + 1);
const TensorShape& data_shape = X->Shape();
Tensor* Y = ctx->Output(i_out, data_shape);
ORT_RETURN_IF_ERROR(CopyTensor(*X, *Y, *ctx->GetComputeStream()));
}
#ifdef ENABLE_NVTX_PROFILE
range.End();
#endif
return Status::OK();
}
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "orttraining/training_ops/cuda/controlflow/record.h"
#include "core/providers/cpu/tensor/utils.h"
// Include event mechanism shared by CPU and GPU implementations.
#include "orttraining/training_ops/cpu/controlflow/event_pool.h"
#include "orttraining/training_ops/cpu/controlflow/record.h"
#include "core/providers/cuda/nvtx_profile.h"
#include "core/providers/cuda/nvtx_profile_context.h"
namespace onnxruntime {
namespace cuda {
ONNX_OPERATOR_KERNEL_EX(
RecordEvent,
kMSDomain,
1,
kCudaExecutionProvider,
(*KernelDefBuilder::Create())
.InputMemoryType(OrtMemTypeCPUInput, 0) /* Keep EventIdentifier in CPU */
.TypeConstraint("TInt64", DataTypeImpl::GetTensorType<int64_t>())
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes())
.VariadicAlias(1, 0), // outputs and inputs are mapped one to one, with input offset by 1
RecordEvent);
Status RecordEvent::ComputeInternal(OpKernelContext* ctx) const {
#ifdef ENABLE_NVTX_PROFILE
const Tensor* event_id_tensor = ctx->Input<Tensor>(0);
const int64_t event_id = *(event_id_tensor->template Data<int64_t>());
auto& profile_context = profile::Context::GetInstance();
const auto tag = profile_context.GetThreadTagOrDefault(std::this_thread::get_id());
profile::NvtxRangeCreator range(
"Batch-" + tag + " Record-" + std::to_string(event_id), profile::Color::Magenta);
range.Begin();
#endif
// Reuse CPU helper to record event because event tensor is a CPU tensor.
onnxruntime::contrib::record_event_in_tensor(*ctx->Input<Tensor>(0));
ORT_ENFORCE(ctx->GetComputeStream());
for (int i_out = 0; i_out < ctx->OutputCount(); ++i_out) {
// This iteration copies (i-1)-th input to i-th output.
const Tensor* X = ctx->Input<Tensor>(i_out + 1);
const TensorShape& data_shape = X->Shape();
Tensor* Y = ctx->Output(i_out, data_shape);
ORT_RETURN_IF_ERROR(CopyTensor(*X, *Y, *ctx->GetComputeStream()));
}
#ifdef ENABLE_NVTX_PROFILE
range.End();
#endif
return Status::OK();
}
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/providers/rocm/rocm_kernel.h"
#include "core/providers/rocm/miopen_common.h"
namespace onnxruntime {
namespace rocm {
class RecordEvent final : public RocmKernel {
public:
RecordEvent(const OpKernelInfo& info) : RocmKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace rocm
} // namespace onnxruntime### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/providers/cuda/cuda_kernel.h"
#include "core/providers/cuda/cudnn_common.h"
namespace onnxruntime {
namespace cuda {
class RecordEvent final : public CudaKernel {
public:
RecordEvent(const OpKernelInfo& info) : CudaKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "orttraining/training_ops/rocm/controlflow/wait.h"
#include "core/providers/cpu/tensor/utils.h"
// Include event mechanism shared by CPU and GPU implementations.
#include "orttraining/training_ops/cpu/controlflow/event_pool.h"
#include "orttraining/training_ops/cpu/controlflow/wait.h"
#include "core/providers/rocm/nvtx_profile.h"
#include "core/providers/rocm/nvtx_profile_context.h"
namespace onnxruntime {
namespace rocm {
ONNX_OPERATOR_KERNEL_EX(
WaitEvent,
kMSDomain,
1,
kRocmExecutionProvider,
(*KernelDefBuilder::Create())
.InputMemoryType(OrtMemTypeCPUInput, 0) /* CPU variable */
.TypeConstraint("TInt64", DataTypeImpl::GetTensorType<int64_t>())
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes())
.VariadicAlias(1, 0), // outputs and inputs are mapped one to one, with input offset by 1
WaitEvent);
Status WaitEvent::ComputeInternal(OpKernelContext* ctx) const {
#ifdef ENABLE_NVTX_PROFILE
const Tensor* event_id_tensor = ctx->Input<Tensor>(0);
const int64_t event_id = *(event_id_tensor->template Data<int64_t>());
auto& profile_context = profile::Context::GetInstance();
const auto tag = profile_context.GetThreadTagOrDefault(std::this_thread::get_id());
profile::NvtxRangeCreator range(
"Batch-" + tag + " Wait-" + std::to_string(event_id), profile::Color::Blue);
range.Begin();
#endif
// Reuse CPU helper to wait event because event tensor is a CPU tensor.
onnxruntime::contrib::wait_event_in_tensor(*ctx->Input<Tensor>(0));
ORT_ENFORCE(ctx->GetComputeStream());
for (int i_out = 0; i_out < ctx->OutputCount(); ++i_out) {
// This iteration copies (i-1)-th input to i-th output.
const Tensor* X = ctx->Input<Tensor>(i_out + 1);
const TensorShape& data_shape = X->Shape();
Tensor* Y = ctx->Output(i_out, data_shape);
ORT_RETURN_IF_ERROR(CopyTensor(*X, *Y, *ctx->GetComputeStream()));
}
#ifdef ENABLE_NVTX_PROFILE
range.End();
#endif
return Status::OK();
}
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "orttraining/training_ops/cuda/controlflow/wait.h"
#include "core/providers/cpu/tensor/utils.h"
// Include event mechanism shared by CPU and GPU implementations.
#include "orttraining/training_ops/cpu/controlflow/event_pool.h"
#include "orttraining/training_ops/cpu/controlflow/wait.h"
#include "core/providers/cuda/nvtx_profile.h"
#include "core/providers/cuda/nvtx_profile_context.h"
namespace onnxruntime {
namespace cuda {
ONNX_OPERATOR_KERNEL_EX(
WaitEvent,
kMSDomain,
1,
kCudaExecutionProvider,
(*KernelDefBuilder::Create())
.InputMemoryType(OrtMemTypeCPUInput, 0) /* CPU variable */
.TypeConstraint("TInt64", DataTypeImpl::GetTensorType<int64_t>())
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes())
.VariadicAlias(1, 0), // outputs and inputs are mapped one to one, with input offset by 1
WaitEvent);
Status WaitEvent::ComputeInternal(OpKernelContext* ctx) const {
#ifdef ENABLE_NVTX_PROFILE
const Tensor* event_id_tensor = ctx->Input<Tensor>(0);
const int64_t event_id = *(event_id_tensor->template Data<int64_t>());
auto& profile_context = profile::Context::GetInstance();
const auto tag = profile_context.GetThreadTagOrDefault(std::this_thread::get_id());
profile::NvtxRangeCreator range(
"Batch-" + tag + " Wait-" + std::to_string(event_id), profile::Color::Blue);
range.Begin();
#endif
// Reuse CPU helper to wait event because event tensor is a CPU tensor.
onnxruntime::contrib::wait_event_in_tensor(*ctx->Input<Tensor>(0));
ORT_ENFORCE(ctx->GetComputeStream());
for (int i_out = 0; i_out < ctx->OutputCount(); ++i_out) {
// This iteration copies (i-1)-th input to i-th output.
const Tensor* X = ctx->Input<Tensor>(i_out + 1);
const TensorShape& data_shape = X->Shape();
Tensor* Y = ctx->Output(i_out, data_shape);
ORT_RETURN_IF_ERROR(CopyTensor(*X, *Y, *ctx->GetComputeStream()));
}
#ifdef ENABLE_NVTX_PROFILE
range.End();
#endif
return Status::OK();
}
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/providers/rocm/rocm_kernel.h"
#include "core/providers/rocm/miopen_common.h"
namespace onnxruntime {
namespace rocm {
class WaitEvent final : public RocmKernel {
public:
WaitEvent(const OpKernelInfo& info) : RocmKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace rocm
} // namespace onnxruntime### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/providers/cuda/cuda_kernel.h"
#include "core/providers/cuda/cudnn_common.h"
namespace onnxruntime {
namespace cuda {
class WaitEvent final : public CudaKernel {
public:
WaitEvent(const OpKernelInfo& info) : CudaKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/shared_library/provider_api.h"
#include "orttraining/training_ops/cpu/controlflow/yield.h"
#include "core/providers/rocm/rocm_fwd.h"
namespace onnxruntime {
namespace rocm {
ONNX_OPERATOR_KERNEL_EX(
YieldOp,
kMSDomain,
1,
kRocmExecutionProvider,
(*KernelDefBuilder::Create())
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes())
.ExternalOutputs(),
onnxruntime::contrib::YieldOp);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/shared_library/provider_api.h"
#include "orttraining/training_ops/cpu/controlflow/yield.h"
#include "core/providers/cuda/cuda_fwd.h"
namespace onnxruntime {
namespace cuda {
ONNX_OPERATOR_KERNEL_EX(
YieldOp,
kMSDomain,
1,
kCudaExecutionProvider,
(*KernelDefBuilder::Create())
.TypeConstraint("T", DataTypeImpl::AllFixedSizeTensorTypes())
.ExternalOutputs(),
onnxruntime::contrib::YieldOp);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
class CumSum final : public RocmKernel {
public:
explicit CumSum(const OpKernelInfo& info) : RocmKernel(info) {
// Process exclusive attribute
int64_t exclusive = 0;
auto status = info.GetAttr("exclusive", &exclusive);
if (status.IsOK()) {
if (exclusive == 1 || exclusive == 0) {
exclusive_ = (exclusive == 1);
} else {
ORT_ENFORCE("attribute exclusive can only be 0 or 1");
}
}
// Process reverse attribute
int64_t reverse = 0;
status = info.GetAttr("reverse", &reverse);
if (status.IsOK()) {
if (reverse == 1 || reverse == 0) {
reverse_ = (reverse == 1);
} else {
ORT_ENFORCE("attribute reverse can only be 0 or 1");
}
}
}
~CumSum() = default;
Status ComputeInternal(OpKernelContext* ctx) const override;
private:
bool exclusive_ = false;
bool reverse_ = false;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
class CumSum final : public CudaKernel {
public:
explicit CumSum(const OpKernelInfo& info) : CudaKernel(info) {
// Process exclusive attribute
int64_t exclusive = 0;
auto status = info.GetAttr("exclusive", &exclusive);
if (status.IsOK()) {
if (exclusive == 1 || exclusive == 0) {
exclusive_ = (exclusive == 1);
} else {
ORT_ENFORCE("attribute exclusive can only be 0 or 1");
}
}
// Process reverse attribute
int64_t reverse = 0;
status = info.GetAttr("reverse", &reverse);
if (status.IsOK()) {
if (reverse == 1 || reverse == 0) {
reverse_ = (reverse == 1);
} else {
ORT_ENFORCE("attribute reverse can only be 0 or 1");
}
}
}
~CumSum() = default;
Status ComputeInternal(OpKernelContext* ctx) const override;
private:
bool exclusive_ = false;
bool reverse_ = false;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/rocm_common.h"
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
template <typename T>
class GistBinarizeEncoderOp final : public RocmKernel {
public:
GistBinarizeEncoderOp(const OpKernelInfo& info) : RocmKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class GistBinarizeDecoderOp final : public RocmKernel {
public:
GistBinarizeDecoderOp(const OpKernelInfo& info) : RocmKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class GistPack1EncoderOp final : public RocmKernel {
public:
static constexpr int GIST_PACK1_FACTOR = 8;
GistPack1EncoderOp(const OpKernelInfo& info) : RocmKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class GistPack1DecoderOp final : public RocmKernel {
public:
static constexpr int GIST_PACK1_FACTOR = 8;
GistPack1DecoderOp(const OpKernelInfo& info) : RocmKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class GistPack8EncoderOp final : public RocmKernel {
public:
GistPack8EncoderOp(const OpKernelInfo& info) : RocmKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class GistPack8DecoderOp final : public RocmKernel {
public:
GistPack8DecoderOp(const OpKernelInfo& info) : RocmKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class GistPack16EncoderOp final : public RocmKernel {
public:
GistPack16EncoderOp(const OpKernelInfo& info) : RocmKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class GistPack16DecoderOp final : public RocmKernel {
public:
GistPack16DecoderOp(const OpKernelInfo& info) : RocmKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class GistPackMsfp15EncoderOp final : public RocmKernel {
public:
GistPackMsfp15EncoderOp(const OpKernelInfo& info) : RocmKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class GistPackMsfp15DecoderOp final : public RocmKernel {
public:
GistPackMsfp15DecoderOp(const OpKernelInfo& info) : RocmKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/cuda_common.h"
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
class GistBinarizeEncoderOp final : public CudaKernel {
public:
GistBinarizeEncoderOp(const OpKernelInfo& info) : CudaKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class GistBinarizeDecoderOp final : public CudaKernel {
public:
GistBinarizeDecoderOp(const OpKernelInfo& info) : CudaKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class GistPack1EncoderOp final : public CudaKernel {
public:
static constexpr int GIST_PACK1_FACTOR = 8;
GistPack1EncoderOp(const OpKernelInfo& info) : CudaKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class GistPack1DecoderOp final : public CudaKernel {
public:
static constexpr int GIST_PACK1_FACTOR = 8;
GistPack1DecoderOp(const OpKernelInfo& info) : CudaKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class GistPack8EncoderOp final : public CudaKernel {
public:
GistPack8EncoderOp(const OpKernelInfo& info) : CudaKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class GistPack8DecoderOp final : public CudaKernel {
public:
GistPack8DecoderOp(const OpKernelInfo& info) : CudaKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class GistPack16EncoderOp final : public CudaKernel {
public:
GistPack16EncoderOp(const OpKernelInfo& info) : CudaKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class GistPack16DecoderOp final : public CudaKernel {
public:
GistPack16DecoderOp(const OpKernelInfo& info) : CudaKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class GistPackMsfp15EncoderOp final : public CudaKernel {
public:
GistPackMsfp15EncoderOp(const OpKernelInfo& info) : CudaKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class GistPackMsfp15DecoderOp final : public CudaKernel {
public:
GistPackMsfp15DecoderOp(const OpKernelInfo& info) : CudaKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/shared_inc/rocm_utils.h"
#include <stdio.h>
#include <assert.h>
#include <hip/hip_runtime.h>
namespace onnxruntime {
namespace rocm {
static constexpr int GIST_PACK1_FACTOR = 8;
template <typename T>
void GistBinarizeEncoderImpl(
hipStream_t stream,
const T* input_data,
bool* output_data,
const size_t nums_of_elements);
template <typename T>
void GistBinarizeDecoderImpl(
hipStream_t stream,
const bool* input_data,
T* output_data,
const size_t nums_of_elements);
template <typename T>
void GistPack1EncoderImpl(
hipStream_t stream,
const T* input_data,
uint8_t* output_data,
const size_t nums_of_elements);
template <typename T>
void GistPack1DecoderImpl(
hipStream_t stream,
const uint8_t* input_data,
T* output_data,
const size_t nums_of_elements);
template <typename T>
void GistPack8EncoderImpl(
hipStream_t stream,
const T* input_data,
uint8_t* output_data,
const size_t nums_of_elements);
template <typename T>
void GistPack8DecoderImpl(
hipStream_t stream,
const uint8_t* input_data,
T* output_data,
const size_t nums_of_elements);
template <typename T>
void GistPack16EncoderImpl(
hipStream_t stream,
const T* input_data,
half* output_data,
const size_t nums_of_elements);
template <typename T>
void GistPack16DecoderImpl(
hipStream_t stream,
const half* input_data,
T* output_data,
const size_t nums_of_elements);
template <typename T>
void GistPackMsfp15EncoderImpl(
hipStream_t stream,
const T* input_data,
uint8_t* output_data,
const size_t pre_axis_size,
const size_t axis_size,
const size_t tile_size);
template <typename T>
void GistPackMsfp15DecoderImpl(
hipStream_t stream,
const uint8_t* input_data,
T* output_data,
const size_t pre_axis_size,
const size_t axis_size,
const size_t tile_size);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/shared_inc/cuda_utils.h"
#include <stdio.h>
#include <assert.h>
#include <cuda_runtime.h>
namespace onnxruntime {
namespace cuda {
static constexpr int GIST_PACK1_FACTOR = 8;
template <typename T>
void GistBinarizeEncoderImpl(
cudaStream_t stream,
const T* input_data,
bool* output_data,
const size_t nums_of_elements);
template <typename T>
void GistBinarizeDecoderImpl(
cudaStream_t stream,
const bool* input_data,
T* output_data,
const size_t nums_of_elements);
template <typename T>
void GistPack1EncoderImpl(
cudaStream_t stream,
const T* input_data,
uint8_t* output_data,
const size_t nums_of_elements);
template <typename T>
void GistPack1DecoderImpl(
cudaStream_t stream,
const uint8_t* input_data,
T* output_data,
const size_t nums_of_elements);
template <typename T>
void GistPack8EncoderImpl(
cudaStream_t stream,
const T* input_data,
uint8_t* output_data,
const size_t nums_of_elements);
template <typename T>
void GistPack8DecoderImpl(
cudaStream_t stream,
const uint8_t* input_data,
T* output_data,
const size_t nums_of_elements);
template <typename T>
void GistPack16EncoderImpl(
cudaStream_t stream,
const T* input_data,
half* output_data,
const size_t nums_of_elements);
template <typename T>
void GistPack16DecoderImpl(
cudaStream_t stream,
const half* input_data,
T* output_data,
const size_t nums_of_elements);
template <typename T>
void GistPackMsfp15EncoderImpl(
cudaStream_t stream,
const T* input_data,
uint8_t* output_data,
const size_t pre_axis_size,
const size_t axis_size,
const size_t tile_size);
template <typename T>
void GistPackMsfp15DecoderImpl(
cudaStream_t stream,
const uint8_t* input_data,
T* output_data,
const size_t pre_axis_size,
const size_t axis_size,
const size_t tile_size);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "orttraining/training_ops/cpu/loss/reduction_type.h"
#include "core/providers/rocm/reduction/reduction_ops.h"
#include "orttraining/training_ops/rocm/loss/softmaxcrossentropy_impl.h"
namespace onnxruntime {
namespace rocm {
template <typename T, typename TAcc, typename TLabel>
void SoftmaxCrossEntropyLossImpl(
hipStream_t stream,
const T* log_prob,
const TLabel* label,
const T* weight,
const TAcc* normalize_factor,
size_t count,
size_t label_depth,
int64_t ignore_index,
T* output_data);
template <typename T, typename TAcc, typename TLabel, typename TOut>
void SoftmaxCrossEntropyLossGradImpl(
hipStream_t stream,
const T* dY,
const T* log_prob,
const TLabel* label,
const T* weight,
const TAcc* normalize_factor,
const TOut* bias_data,
size_t count,
size_t label_depth,
bool reduction_none,
TOut* output_data);
template <typename T, typename TLabel, typename TOut>
void ComputeSoftmaxCrossEntropyWeightsImpl(
hipStream_t stream,
const TLabel* label,
const T* weight,
size_t count,
size_t label_depth,
int64_t ignore_index,
TOut* weight_data_nd);
template <typename T, typename TLabel, typename TOut>
class SoftmaxCrossEntropyLoss final : public LossBase {
public:
SoftmaxCrossEntropyLoss(const OpKernelInfo& info) : LossBase(info) {
int64_t default_ignore_index = -1;
info.GetAttrOrDefault<int64_t>("ignore_index", &ignore_index_, default_ignore_index);
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
int64_t ignore_index_;
};
template <typename T, typename TLabel, typename TOut>
class SoftmaxCrossEntropyLossGrad final : public LossBase {
public:
SoftmaxCrossEntropyLossGrad(const OpKernelInfo& info) : LossBase(info) {
int64_t default_ignore_index = -1;
info.GetAttrOrDefault<int64_t>("ignore_index", &ignore_index_, default_ignore_index);
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
int64_t ignore_index_;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "orttraining/training_ops/cpu/loss/reduction_type.h"
#include "core/providers/cuda/reduction/reduction_ops.h"
#include "orttraining/training_ops/cuda/loss/softmaxcrossentropy_impl.h"
namespace onnxruntime {
namespace cuda {
template <typename T, typename TAcc, typename TLabel>
void SoftmaxCrossEntropyLossImpl(
cudaStream_t stream,
const T* log_prob,
const TLabel* label,
const T* weight,
const TAcc* normalize_factor,
size_t count,
size_t label_depth,
int64_t ignore_index,
T* output_data);
template <typename T, typename TAcc, typename TLabel, typename TOut>
void SoftmaxCrossEntropyLossGradImpl(
cudaStream_t stream,
const T* dY,
const T* log_prob,
const TLabel* label,
const T* weight,
const TAcc* normalize_factor,
const TOut* bias_data,
size_t count,
size_t label_depth,
bool reduction_none,
TOut* output_data);
template <typename T, typename TLabel, typename TOut>
void ComputeSoftmaxCrossEntropyWeightsImpl(
cudaStream_t stream,
const TLabel* label,
const T* weight,
size_t count,
size_t label_depth,
int64_t ignore_index,
TOut* weight_data_nd);
template <typename T, typename TLabel, typename TOut>
class SoftmaxCrossEntropyLoss final : public LossBase {
public:
SoftmaxCrossEntropyLoss(const OpKernelInfo& info) : LossBase(info) {
int64_t default_ignore_index = -1;
info.GetAttrOrDefault<int64_t>("ignore_index", &ignore_index_, default_ignore_index);
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
int64_t ignore_index_;
};
template <typename T, typename TLabel, typename TOut>
class SoftmaxCrossEntropyLossGrad final : public LossBase {
public:
SoftmaxCrossEntropyLossGrad(const OpKernelInfo& info) : LossBase(info) {
int64_t default_ignore_index = -1;
info.GetAttrOrDefault<int64_t>("ignore_index", &ignore_index_, default_ignore_index);
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
int64_t ignore_index_;
};
} // namespace cuda
} // namespace onnxruntime
### |
#pragma once
#include "orttraining/training_ops/cpu/loss/reduction_type.h"
#include "core/providers/rocm/reduction/reduction_ops.h"
namespace onnxruntime {
namespace rocm {
template <typename T>
void SoftMaxCrossEntropyImpl(
hipStream_t stream, const T* log_prob, const T* label, size_t normalize_factor, T* output_data, size_t count);
template <typename T>
void SoftMaxCrossEntropyGradImpl(
hipStream_t stream, const T* dY, const T* log_prob, const T* label, size_t normalize_factor, T* output_data, size_t count);
template <typename T, typename Tin>
void SparseSoftmaxCrossEntropyImpl(
hipStream_t stream, const T* log_prob, const Tin* label, const T* weight, const T* normalize_factor, T* output_data, size_t count, size_t label_depth);
template <typename T, typename Tin>
void SparseSoftmaxCrossEntropyGradImpl(
hipStream_t stream, const T* dY, const T* log_prob, const Tin* label, const T* weight, const T* normalize_factor, T* output_data, size_t count, size_t label_depth);
class LossBase : public ReduceKernel<true> {
public:
explicit LossBase(const OpKernelInfo& info)
: ReduceKernel<true>(info, int64_t(0)) {
std::string reduction;
ORT_ENFORCE(info.GetAttr<std::string>("reduction", &reduction).IsOK());
reduction_ = StringToReductionType(reduction);
}
protected:
ReductionType reduction_;
};
template <typename T>
class SoftmaxCrossEntropy final : public LossBase {
public:
SoftmaxCrossEntropy(const OpKernelInfo& info) : LossBase(info) {
ORT_ENFORCE(reduction_ != ReductionType::NONE, "Loss with reduction 'none' is not implemented.");
}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class SoftmaxCrossEntropyGrad final : public LossBase {
public:
SoftmaxCrossEntropyGrad(const OpKernelInfo& info) : LossBase(info) {
ORT_ENFORCE(reduction_ != ReductionType::NONE, "Loss with reduction 'none' is not implemented.");
}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T, typename Tin>
class SparseSoftmaxCrossEntropy final : public LossBase {
public:
SparseSoftmaxCrossEntropy(const OpKernelInfo& info) : LossBase(info) {
ORT_ENFORCE(reduction_ != ReductionType::NONE, "Loss with reduction 'none' is not implemented.");
}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T, typename Tin>
class SparseSoftmaxCrossEntropyGrad final : public LossBase {
public:
SparseSoftmaxCrossEntropyGrad(const OpKernelInfo& info) : LossBase(info) {
ORT_ENFORCE(reduction_ != ReductionType::NONE, "Loss with reduction 'none' is not implemented.");
}
Status ComputeInternal(OpKernelContext* context) const override;
};
}
} ### |
#pragma once
#include "orttraining/training_ops/cpu/loss/reduction_type.h"
#include "core/providers/cuda/reduction/reduction_ops.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
void SoftMaxCrossEntropyImpl(
cudaStream_t stream, const T* log_prob, const T* label, size_t normalize_factor, T* output_data, size_t count);
template <typename T>
void SoftMaxCrossEntropyGradImpl(
cudaStream_t stream, const T* dY, const T* log_prob, const T* label, size_t normalize_factor, T* output_data, size_t count);
template <typename T, typename Tin>
void SparseSoftmaxCrossEntropyImpl(
cudaStream_t stream, const T* log_prob, const Tin* label, const T* weight, const T* normalize_factor, T* output_data, size_t count, size_t label_depth);
template <typename T, typename Tin>
void SparseSoftmaxCrossEntropyGradImpl(
cudaStream_t stream, const T* dY, const T* log_prob, const Tin* label, const T* weight, const T* normalize_factor, T* output_data, size_t count, size_t label_depth);
class LossBase : public ReduceKernel<true> {
public:
explicit LossBase(const OpKernelInfo& info)
: ReduceKernel<true>(info, int64_t(0)) {
std::string reduction;
ORT_ENFORCE(info.GetAttr<std::string>("reduction", &reduction).IsOK());
reduction_ = StringToReductionType(reduction);
}
protected:
ReductionType reduction_;
};
template <typename T>
class SoftmaxCrossEntropy final : public LossBase {
public:
SoftmaxCrossEntropy(const OpKernelInfo& info) : LossBase(info) {
ORT_ENFORCE(reduction_ != ReductionType::NONE, "Loss with reduction 'none' is not implemented.");
}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T>
class SoftmaxCrossEntropyGrad final : public LossBase {
public:
SoftmaxCrossEntropyGrad(const OpKernelInfo& info) : LossBase(info) {
ORT_ENFORCE(reduction_ != ReductionType::NONE, "Loss with reduction 'none' is not implemented.");
}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T, typename Tin>
class SparseSoftmaxCrossEntropy final : public LossBase {
public:
SparseSoftmaxCrossEntropy(const OpKernelInfo& info) : LossBase(info) {
ORT_ENFORCE(reduction_ != ReductionType::NONE, "Loss with reduction 'none' is not implemented.");
}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T, typename Tin>
class SparseSoftmaxCrossEntropyGrad final : public LossBase {
public:
SparseSoftmaxCrossEntropyGrad(const OpKernelInfo& info) : LossBase(info) {
ORT_ENFORCE(reduction_ != ReductionType::NONE, "Loss with reduction 'none' is not implemented.");
}
Status ComputeInternal(OpKernelContext* context) const override;
};
}
}
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
class BiasSoftmaxDropout final : public onnxruntime::rocm::RocmKernel {
public:
BiasSoftmaxDropout(const OpKernelInfo& info) : RocmKernel{info} {
info.GetAttrOrDefault("axis", &axis_, static_cast<int64_t>(1));
int64_t is_inner_broadcast_value;
ORT_ENFORCE(info.GetAttr<int64_t>("is_inner_broadcast", &is_inner_broadcast_value).IsOK());
is_inner_broadcast_ = is_inner_broadcast_value != 0;
int64_t seed = 0;
if (info.GetAttr<int64_t>("seed", &seed).IsOK()) {
generator_ = std::make_unique<PhiloxGenerator>(static_cast<uint64_t>(seed));
}
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
// For Softmax.
int64_t axis_;
bool is_inner_broadcast_;
// For Dropout.
mutable std::unique_ptr<PhiloxGenerator> generator_;
static constexpr float default_ratio_ = 0.5f;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
class BiasSoftmaxDropout final : public onnxruntime::cuda::CudaKernel {
public:
BiasSoftmaxDropout(const OpKernelInfo& info) : CudaKernel{info} {
info.GetAttrOrDefault("axis", &axis_, static_cast<int64_t>(1));
int64_t is_inner_broadcast_value;
ORT_ENFORCE(info.GetAttr<int64_t>("is_inner_broadcast", &is_inner_broadcast_value).IsOK());
is_inner_broadcast_ = is_inner_broadcast_value != 0;
int64_t seed = 0;
if (info.GetAttr<int64_t>("seed", &seed).IsOK()) {
generator_ = std::make_unique<PhiloxGenerator>(static_cast<uint64_t>(seed));
}
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
// For Softmax.
int64_t axis_;
bool is_inner_broadcast_;
// For Dropout.
mutable std::unique_ptr<PhiloxGenerator> generator_;
static constexpr float default_ratio_ = 0.5f;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/framework/random_generator.h"
#include "core/providers/rocm/rocm_common.h"
namespace onnxruntime {
namespace rocm {
template <typename T>
Status BiasSoftmaxDropoutImpl(hipStream_t stream, const hipDeviceProp_t& prop, miopenHandle_t miopen_handle,
T* dropout_output_data, bool* mask_data, T* softmax_output_data, const T* input_data,
const T* bias_data, int element_count, int batch_count, bool is_inner_broadcast,
int bias_broadcast_size, const float ratio, PhiloxGenerator& generator);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/framework/random_generator.h"
#include "core/providers/cuda/cuda_common.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
Status BiasSoftmaxDropoutImpl(cudaStream_t stream, const cudaDeviceProp& prop, cudnnHandle_t cudnn_handle,
T* dropout_output_data, bool* mask_data, T* softmax_output_data, const T* input_data,
const T* bias_data, int element_count, int batch_count, bool is_inner_broadcast,
int bias_broadcast_size, const float ratio, PhiloxGenerator& generator);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/rocm_common.h"
#include "core/providers/rocm/reduction/reduction_ops.h"
namespace onnxruntime {
namespace rocm {
template <typename T>
class DivGrad : public ReduceKernel<true> { // TODO: not to derive from ReduceKernel.
// Use a miopen reduce sum simple helper instead.
public:
DivGrad(const OpKernelInfo& info) : ReduceKernel<true>(info, /*keep_dims_override*/ int64_t(0)) {}
Status ComputeInternal(OpKernelContext*) const override;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/cuda_common.h"
#include "core/providers/cuda/reduction/reduction_ops.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
class DivGrad : public ReduceKernel<true> { // TODO: not to derive from ReduceKernel.
// Use a cudnn reduce sum simple helper instead.
public:
DivGrad(const OpKernelInfo& info) : ReduceKernel<true>(info, /*keep_dims_override*/ int64_t(0)) {}
Status ComputeInternal(OpKernelContext*) const override;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/rocm/shared_inc/rocm_utils.h"
namespace onnxruntime {
namespace rocm {
template <typename T>
void ImplDivGradSimple(
hipStream_t stream,
SimpleBroadcast simpleBroadcast,
const T* a_data,
const T* b_data,
const T* dy_data,
size_t count,
T* da_output_data,
T* db_output_data);
template <typename T>
void ImplDivGradRhsPerChannelBatch1(
hipStream_t stream,
const T* a_data,
const T* b_data,
const T* dy_data,
size_t count,
const fast_divmod& fdm_H,
T* da_output_data,
T* db_output_data);
template <typename T>
void ImplDivGradRhsPerChannelBatchN(
hipStream_t stream,
const T* a_data,
const T* b_data,
const T* dy_data,
size_t count,
const fast_divmod& fdm_H,
const fast_divmod& fdm_C,
T* da_output_data,
T* db_output_data);
template <typename T>
void ImplDivGrad(
hipStream_t stream,
int32_t output_rank,
const TArray<int64_t>& a_padded_strides,
const T* a_data,
const TArray<int64_t>& b_padded_strides,
const T* b_data,
const T* dy_data,
size_t count,
const TArray<fast_divmod>& fdm_output_strides,
T* da_output_data,
T* db_output_data);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/cuda/shared_inc/cuda_utils.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
void ImplDivGradSimple(
cudaStream_t stream,
SimpleBroadcast simpleBroadcast,
const T* a_data,
const T* b_data,
const T* dy_data,
size_t count,
T* da_output_data,
T* db_output_data);
template <typename T>
void ImplDivGradRhsPerChannelBatch1(
cudaStream_t stream,
const T* a_data,
const T* b_data,
const T* dy_data,
size_t count,
const fast_divmod& fdm_H,
T* da_output_data,
T* db_output_data);
template <typename T>
void ImplDivGradRhsPerChannelBatchN(
cudaStream_t stream,
const T* a_data,
const T* b_data,
const T* dy_data,
size_t count,
const fast_divmod& fdm_H,
const fast_divmod& fdm_C,
T* da_output_data,
T* db_output_data);
template <typename T>
void ImplDivGrad(
cudaStream_t stream,
int32_t output_rank,
const TArray<int64_t>& a_padded_strides,
const T* a_data,
const TArray<int64_t>& b_padded_strides,
const T* b_data,
const T* dy_data,
size_t count,
const TArray<fast_divmod>& fdm_output_strides,
T* da_output_data,
T* db_output_data);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "orttraining/training_ops/rocm/math/isfinite.h"
#include "orttraining/training_ops/rocm/math/isfinite_impl.h"
using namespace ONNX_NAMESPACE;
using namespace onnxruntime::common;
namespace onnxruntime {
namespace rocm {
#define REGISTER_ISFINITE_KERNEL_TYPED(T) \
ONNX_OPERATOR_TYPED_KERNEL_EX( \
IsFinite, \
kMSDomain, \
1, \
T, \
kRocmExecutionProvider, \
(*KernelDefBuilder::Create()) \
.TypeConstraint("T", DataTypeImpl::GetTensorType<T>()) \
.TypeConstraint("T1", DataTypeImpl::GetTensorType<bool>()), \
IsFiniteOp<T>);
template <typename TSrc>
Status IsFiniteOp<TSrc>::ComputeInternal(OpKernelContext* context) const {
typedef typename ToHipType<TSrc>::MappedType HipTSrc;
const Tensor& input = *context->Input<Tensor>(0);
Tensor& output = *context->Output(0, input.Shape());
IsFinite(
Stream(context),
reinterpret_cast<const HipTSrc*>(input.Data<TSrc>()),
output.MutableData<bool>(), input.Shape().Size());
return Status::OK();
}
REGISTER_ISFINITE_KERNEL_TYPED(MLFloat16)
REGISTER_ISFINITE_KERNEL_TYPED(float)
REGISTER_ISFINITE_KERNEL_TYPED(double)
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "orttraining/training_ops/cuda/math/isfinite.h"
#include "orttraining/training_ops/cuda/math/isfinite_impl.h"
using namespace ONNX_NAMESPACE;
using namespace onnxruntime::common;
namespace onnxruntime {
namespace cuda {
#define REGISTER_ISFINITE_KERNEL_TYPED(T) \
ONNX_OPERATOR_TYPED_KERNEL_EX( \
IsFinite, \
kMSDomain, \
1, \
T, \
kCudaExecutionProvider, \
(*KernelDefBuilder::Create()) \
.TypeConstraint("T", DataTypeImpl::GetTensorType<T>()) \
.TypeConstraint("T1", DataTypeImpl::GetTensorType<bool>()), \
IsFiniteOp<T>);
template <typename TSrc>
Status IsFiniteOp<TSrc>::ComputeInternal(OpKernelContext* context) const {
typedef typename ToCudaType<TSrc>::MappedType CudaTSrc;
const Tensor& input = *context->Input<Tensor>(0);
Tensor& output = *context->Output(0, input.Shape());
IsFinite(
Stream(context),
reinterpret_cast<const CudaTSrc*>(input.Data<TSrc>()),
output.MutableData<bool>(), input.Shape().Size());
return Status::OK();
}
REGISTER_ISFINITE_KERNEL_TYPED(MLFloat16)
REGISTER_ISFINITE_KERNEL_TYPED(float)
REGISTER_ISFINITE_KERNEL_TYPED(double)
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
template <typename TSrc>
class IsFiniteOp final : public RocmKernel {
public:
IsFiniteOp(const OpKernelInfo& info) : RocmKernel(info) {
}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace rocm
} // namespace onnxruntime### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
template <typename TSrc>
class IsFiniteOp final : public CudaKernel {
public:
IsFiniteOp(const OpKernelInfo& info) : CudaKernel(info) {
}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/rocm/shared_inc/rocm_utils.h"
namespace onnxruntime {
namespace rocm {
template <typename T>
void CumSumImpl(
hipStream_t stream,
const T* input_data,
const fast_divmod& input_dim_along_axis,
const fast_divmod& input_stride_along_axis,
T* output_data,
int64_t output_size,
bool exclusive,
bool reverse);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
#include "core/providers/cuda/shared_inc/cuda_utils.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
void CumSumImpl(
cudaStream_t stream,
const T* input_data,
const fast_divmod& input_dim_along_axis,
const fast_divmod& input_stride_along_axis,
T* output_data,
int64_t output_size,
bool exclusive,
bool reverse);
} // namespace cuda
} // namespace onnxruntime
### |
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "isfinite_impl.h"
#include <hip/hip_fp16.h>
#include "core/providers/rocm/cu_inc/common.cuh"
#include "contrib_ops/rocm/math/isfinite.cuh"
namespace onnxruntime {
namespace rocm {
template <typename TSrc>
__global__ void _IsFinite(const TSrc* input, bool* output, HIP_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
output[id] = IsFiniteScalar(input[id]);
}
template <typename TSrc>
void IsFinite(hipStream_t stream, const TSrc* input, bool* output, size_t count) {
int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock));
HIP_LONG N = static_cast<HIP_LONG>(count);
_IsFinite<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(input, output, N);
}
#define SPECIALIZE_ISFINITE_IMPL(T) \
template void IsFinite(hipStream_t stream, const T* input, bool* output, size_t count);
SPECIALIZE_ISFINITE_IMPL(half)
SPECIALIZE_ISFINITE_IMPL(float)
SPECIALIZE_ISFINITE_IMPL(double)
} // namespace rocm
} // namespace onnxruntime### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "isfinite_impl.h"
#include <cuda_fp16.h>
#include "core/providers/cuda/cu_inc/common.cuh"
#include "contrib_ops/cuda/math/isfinite.cuh"
namespace onnxruntime {
namespace cuda {
template <typename TSrc>
__global__ void _IsFinite(const TSrc* input, bool* output, CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
output[id] = IsFiniteScalar(input[id]);
}
template <typename TSrc>
void IsFinite(cudaStream_t stream, const TSrc* input, bool* output, size_t count) {
int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock));
CUDA_LONG N = static_cast<CUDA_LONG>(count);
_IsFinite<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(input, output, N);
}
#define SPECIALIZE_ISFINITE_IMPL(T) \
template void IsFinite(cudaStream_t stream, const T* input, bool* output, size_t count);
SPECIALIZE_ISFINITE_IMPL(half)
SPECIALIZE_ISFINITE_IMPL(float)
SPECIALIZE_ISFINITE_IMPL(double)
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <hip/hip_runtime.h>
namespace onnxruntime {
namespace rocm {
template <typename TSrc>
void IsFinite(hipStream_t stream, const TSrc* input, bool* output, size_t N);
}
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <cuda_runtime.h>
namespace onnxruntime {
namespace cuda {
template <typename TSrc>
void IsFinite(cudaStream_t stream, const TSrc* input, bool* output, size_t N);
}
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
template <typename SrcT>
class MixedPrecisionScale final : public RocmKernel {
public:
MixedPrecisionScale(const OpKernelInfo& info);
Status ComputeInternal(OpKernelContext* context) const override;
private:
ONNX_NAMESPACE::TensorProto_DataType to_;
size_t bytes_per_output_elem_;
bool fuse_outputs_;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
template <typename SrcT>
class MixedPrecisionScale final : public CudaKernel {
public:
MixedPrecisionScale(const OpKernelInfo& info);
Status ComputeInternal(OpKernelContext* context) const override;
private:
ONNX_NAMESPACE::TensorProto_DataType to_;
size_t bytes_per_output_elem_;
bool fuse_outputs_;
};
} // namespace cuda
} // namespace onnxruntime
### |
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifdef _WIN32
#pragma warning(disable : 4244)
#endif
#include "mixed_precision_scale_impl.h"
#include <hip/hip_fp16.h>
#include "core/providers/rocm/cu_inc/common.cuh"
namespace onnxruntime {
namespace rocm {
template <typename SrcT, typename DstT>
__global__ void _MixedPrecisionScale(
const SrcT* input_data,
const float* scale_data,
DstT* output_data,
HIP_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
output_data[id] = static_cast<DstT>(*scale_data * static_cast<float>(input_data[id]));
}
template <typename SrcT, typename DstT>
void Impl_MixedPrecisionScale(
hipStream_t stream,
const SrcT* input_data,
const float* scale_data,
DstT* output_data,
size_t count){
int blocksPerGrid = static_cast<int>(CeilDiv(count, GridDim::maxThreadsPerBlock));
HIP_LONG N = static_cast<HIP_LONG>(count);
_MixedPrecisionScale<SrcT, DstT><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(
input_data,
scale_data,
output_data,
N);
}
#define SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(SrcT, DstT) \
template void Impl_MixedPrecisionScale<SrcT, DstT>( \
hipStream_t stream, \
const SrcT* input_data, \
const float* scale_data, \
DstT* output_data, \
size_t count);
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(half, half)
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(half, float)
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(float, half)
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(float, float)
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(BFloat16, BFloat16)
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(BFloat16, float)
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(float, BFloat16)
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(BFloat16, half)
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(half, BFloat16)
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifdef _WIN32
#pragma warning(disable : 4244)
#endif
#include "mixed_precision_scale_impl.h"
#include <cuda_fp16.h>
#include "core/providers/cuda/cu_inc/common.cuh"
namespace onnxruntime {
namespace cuda {
template <typename SrcT, typename DstT>
__global__ void _MixedPrecisionScale(
const SrcT* input_data,
const float* scale_data,
DstT* output_data,
CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
output_data[id] = static_cast<DstT>(*scale_data * static_cast<float>(input_data[id]));
}
template <typename SrcT, typename DstT>
void Impl_MixedPrecisionScale(
cudaStream_t stream,
const SrcT* input_data,
const float* scale_data,
DstT* output_data,
size_t count){
int blocksPerGrid = static_cast<int>(CeilDiv(count, GridDim::maxThreadsPerBlock));
CUDA_LONG N = static_cast<CUDA_LONG>(count);
_MixedPrecisionScale<SrcT, DstT><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(
input_data,
scale_data,
output_data,
N);
}
#define SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(SrcT, DstT) \
template void Impl_MixedPrecisionScale<SrcT, DstT>( \
cudaStream_t stream, \
const SrcT* input_data, \
const float* scale_data, \
DstT* output_data, \
size_t count);
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(half, half)
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(half, float)
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(float, half)
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(float, float)
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(BFloat16, BFloat16)
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(BFloat16, float)
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(float, BFloat16)
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(BFloat16, half)
SPECIALIZE_MIXEDPRECISIONSCALE_IMPL(half, BFloat16)
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <hip/hip_runtime.h>
namespace onnxruntime {
namespace rocm {
template <typename SrcT, typename DstT>
void Impl_MixedPrecisionScale(
hipStream_t stream,
const SrcT* input_data,
const float* scale_data,
DstT* output_data,
size_t count);
}
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <cuda_runtime.h>
namespace onnxruntime {
namespace cuda {
template <typename SrcT, typename DstT>
void Impl_MixedPrecisionScale(
cudaStream_t stream,
const SrcT* input_data,
const float* scale_data,
DstT* output_data,
size_t count);
}
} // namespace onnxruntime
### |
#include "orttraining/training_ops/rocm/math/scale.h"
#include "orttraining/training_ops/rocm/math/scale_impl.h"
using namespace ONNX_NAMESPACE;
using namespace onnxruntime::common;
namespace onnxruntime {
namespace rocm {
#define REGISTER_SCALE_KERNEL_TYPED(T) ONNX_OPERATOR_TYPED_KERNEL_EX( Scale, kMSDomain, 1, T, kRocmExecutionProvider, (*KernelDefBuilder::Create()) .TypeConstraint("T", DataTypeImpl::GetTensorType<T>()) .TypeConstraint("ScaleT", {DataTypeImpl::GetTensorType<float>(), DataTypeImpl::GetTensorType<double>(), DataTypeImpl::GetTensorType<MLFloat16>(), DataTypeImpl::GetTensorType<int64_t>(), DataTypeImpl::GetTensorType<int32_t>()}) .InputMemoryType(OrtMemTypeCPUInput, 1), Scale<T>);
template <typename ScaleT>
struct GetScaleValueImpl {
void operator()(const Tensor* scale, float& scale_value) const {
ORT_ENFORCE(scale->Shape().Size() == 1, "Scale input should have a single value.");
scale_value = static_cast<float>(*(scale->template Data<ScaleT>()));
ORT_ENFORCE(scale_value != 0.0f, "Scale value must not be 0.");
}
};
template <typename T>
Scale<T>::Scale(const OpKernelInfo& info) : RocmKernel(info) {
int64_t scale_down;
info.GetAttrOrDefault("scale_down", &scale_down, static_cast<int64_t>(0));
scale_down_ = (scale_down != 0);
}
template <typename T>
Status Scale<T>::ComputeInternal(OpKernelContext* context) const {
typedef typename ToHipType<T>::MappedType HipT;
float scale_value;
auto scale_tensor = context->Input<Tensor>(1);
utils::MLTypeCallDispatcher<float, double, MLFloat16, int64_t, int32_t> t_disp(scale_tensor->GetElementType());
t_disp.Invoke<GetScaleValueImpl>(scale_tensor, scale_value);
if (scale_down_) {
scale_value = 1.0f / scale_value;
}
auto lhs_tensor = context->Input<Tensor>(0);
auto output_tensor = context->Output(0, lhs_tensor->Shape());
Impl_Scale<HipT>(
Stream(context), reinterpret_cast<const HipT*>(lhs_tensor->template Data<T>()), scale_value, reinterpret_cast<HipT*>(output_tensor->template MutableData<T>()), output_tensor->Shape().Size());
return Status::OK();
}
REGISTER_SCALE_KERNEL_TYPED(MLFloat16)
REGISTER_SCALE_KERNEL_TYPED(float)
REGISTER_SCALE_KERNEL_TYPED(double)
template Status Scale<MLFloat16>::ComputeInternal(OpKernelContext* context) const;
template Status Scale<float>::ComputeInternal(OpKernelContext* context) const;
template Status Scale<double>::ComputeInternal(OpKernelContext* context) const;
}
} ### |
#include "orttraining/training_ops/cuda/math/scale.h"
#include "orttraining/training_ops/cuda/math/scale_impl.h"
using namespace ONNX_NAMESPACE;
using namespace onnxruntime::common;
namespace onnxruntime {
namespace cuda {
#define REGISTER_SCALE_KERNEL_TYPED(T) ONNX_OPERATOR_TYPED_KERNEL_EX( Scale, kMSDomain, 1, T, kCudaExecutionProvider, (*KernelDefBuilder::Create()) .TypeConstraint("T", DataTypeImpl::GetTensorType<T>()) .TypeConstraint("ScaleT", {DataTypeImpl::GetTensorType<float>(), DataTypeImpl::GetTensorType<double>(), DataTypeImpl::GetTensorType<MLFloat16>(), DataTypeImpl::GetTensorType<int64_t>(), DataTypeImpl::GetTensorType<int32_t>()}) .InputMemoryType(OrtMemTypeCPUInput, 1), Scale<T>);
template <typename ScaleT>
struct GetScaleValueImpl {
void operator()(const Tensor* scale, float& scale_value) const {
ORT_ENFORCE(scale->Shape().Size() == 1, "Scale input should have a single value.");
scale_value = static_cast<float>(*(scale->template Data<ScaleT>()));
ORT_ENFORCE(scale_value != 0.0f, "Scale value must not be 0.");
}
};
template <typename T>
Scale<T>::Scale(const OpKernelInfo& info) : CudaKernel(info) {
int64_t scale_down;
info.GetAttrOrDefault("scale_down", &scale_down, static_cast<int64_t>(0));
scale_down_ = (scale_down != 0);
}
template <typename T>
Status Scale<T>::ComputeInternal(OpKernelContext* context) const {
typedef typename ToCudaType<T>::MappedType CudaT;
float scale_value;
auto scale_tensor = context->Input<Tensor>(1);
utils::MLTypeCallDispatcher<float, double, MLFloat16, int64_t, int32_t> t_disp(scale_tensor->GetElementType());
t_disp.Invoke<GetScaleValueImpl>(scale_tensor, scale_value);
if (scale_down_) {
scale_value = 1.0f / scale_value;
}
auto lhs_tensor = context->Input<Tensor>(0);
auto output_tensor = context->Output(0, lhs_tensor->Shape());
Impl_Scale<CudaT>(
Stream(context), reinterpret_cast<const CudaT*>(lhs_tensor->template Data<T>()), scale_value, reinterpret_cast<CudaT*>(output_tensor->template MutableData<T>()), output_tensor->Shape().Size());
return Status::OK();
}
REGISTER_SCALE_KERNEL_TYPED(MLFloat16)
REGISTER_SCALE_KERNEL_TYPED(float)
REGISTER_SCALE_KERNEL_TYPED(double)
template Status Scale<MLFloat16>::ComputeInternal(OpKernelContext* context) const;
template Status Scale<float>::ComputeInternal(OpKernelContext* context) const;
template Status Scale<double>::ComputeInternal(OpKernelContext* context) const;
}
}
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
template <typename T>
class Scale final : public RocmKernel {
public:
Scale(const OpKernelInfo& info);
Status ComputeInternal(OpKernelContext* context) const override;
private:
bool scale_down_;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
class Scale final : public CudaKernel {
public:
Scale(const OpKernelInfo& info);
Status ComputeInternal(OpKernelContext* context) const override;
private:
bool scale_down_;
};
} // namespace cuda
} // namespace onnxruntime
### |
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "orttraining/training_ops/rocm/math/scale_impl.h"
#include "core/providers/rocm/cu_inc/common.cuh"
namespace onnxruntime {
namespace rocm {
template <typename T, int NumThreadsPerBlock, int NumElementsPerThread>
__global__ void _Scale(
const T* input_data,
const T scale_value,
T* output_data,
HIP_LONG N) {
HIP_LONG start = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x;
T input_value[NumElementsPerThread];
HIP_LONG id = start;
#pragma unroll
for (int i = 0; i < NumElementsPerThread; i++) {
if (id < N) {
input_value[i] = input_data[id];
id += NumThreadsPerBlock;
}
}
id = start;
#pragma unroll
for (int i = 0; i < NumElementsPerThread; i++) {
if (id < N) {
output_data[id] = input_value[i] * scale_value;
id += NumThreadsPerBlock;
}
}
}
template <typename T>
void Impl_Scale(
hipStream_t stream,
const T* input_data,
const float scale_value,
T* output_data,
size_t count) {
int blocksPerGrid = static_cast<int>(CeilDiv(count, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread));
HIP_LONG N = static_cast<HIP_LONG>(count);
_Scale<T, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(
input_data,
static_cast<T>(scale_value),
output_data,
N);
}
#define SPECIALIZE_SCALE_IMPL(T) \
template void Impl_Scale<T>( \
hipStream_t stream, \
const T* input_data, \
const float scale_value, \
T* output_data, \
size_t count);
SPECIALIZE_SCALE_IMPL(half)
SPECIALIZE_SCALE_IMPL(float)
SPECIALIZE_SCALE_IMPL(double)
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "orttraining/training_ops/cuda/math/scale_impl.h"
#include "core/providers/cuda/cu_inc/common.cuh"
namespace onnxruntime {
namespace cuda {
template <typename T, int NumThreadsPerBlock, int NumElementsPerThread>
__global__ void _Scale(
const T* input_data,
const T scale_value,
T* output_data,
CUDA_LONG N) {
CUDA_LONG start = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x;
T input_value[NumElementsPerThread];
CUDA_LONG id = start;
#pragma unroll
for (int i = 0; i < NumElementsPerThread; i++) {
if (id < N) {
input_value[i] = input_data[id];
id += NumThreadsPerBlock;
}
}
id = start;
#pragma unroll
for (int i = 0; i < NumElementsPerThread; i++) {
if (id < N) {
output_data[id] = input_value[i] * scale_value;
id += NumThreadsPerBlock;
}
}
}
template <typename T>
void Impl_Scale(
cudaStream_t stream,
const T* input_data,
const float scale_value,
T* output_data,
size_t count) {
int blocksPerGrid = static_cast<int>(CeilDiv(count, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread));
CUDA_LONG N = static_cast<CUDA_LONG>(count);
_Scale<T, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(
input_data,
static_cast<T>(scale_value),
output_data,
N);
}
#define SPECIALIZE_SCALE_IMPL(T) \
template void Impl_Scale<T>( \
cudaStream_t stream, \
const T* input_data, \
const float scale_value, \
T* output_data, \
size_t count);
SPECIALIZE_SCALE_IMPL(half)
SPECIALIZE_SCALE_IMPL(float)
SPECIALIZE_SCALE_IMPL(double)
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <hip/hip_runtime.h>
namespace onnxruntime {
namespace rocm {
template <typename T>
void Impl_Scale(
hipStream_t stream,
const T* input_data,
const float scale_value,
T* output_data,
size_t count);
}
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <cuda_runtime.h>
namespace onnxruntime {
namespace cuda {
template <typename T>
void Impl_Scale(
cudaStream_t stream,
const T* input_data,
const float scale_value,
T* output_data,
size_t count);
}
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
class SoftmaxDropoutGrad final : public RocmKernel {
public:
SoftmaxDropoutGrad(const OpKernelInfo& info) : RocmKernel(info) {
info.GetAttrOrDefault("axis", &axis_, static_cast<int64_t>(1));
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
int64_t axis_;
static constexpr float default_ratio_ = 0.5f;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
class SoftmaxDropoutGrad final : public CudaKernel {
public:
SoftmaxDropoutGrad(const OpKernelInfo& info) : CudaKernel(info) {
info.GetAttrOrDefault("axis", &axis_, static_cast<int64_t>(1));
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
int64_t axis_;
static constexpr float default_ratio_ = 0.5f;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
template <typename T>
class Gemm final : public RocmKernel {
using Base = RocmKernel;
public:
Gemm(const OpKernelInfo& info) : RocmKernel(info) {
int64_t temp;
ORT_ENFORCE(info.GetAttr<int64_t>("transA", &temp).IsOK());
trans_A_ = (temp != 0);
ORT_ENFORCE(info.GetAttr<int64_t>("transB", &temp).IsOK());
trans_B_ = (temp != 0);
ORT_ENFORCE(info.GetAttr<float>("alpha", &alpha_).IsOK());
ORT_ENFORCE(info.GetAttr<float>("beta", &beta_).IsOK());
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
bool trans_A_;
bool trans_B_;
float alpha_;
float beta_;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
class Gemm final : public CudaKernel {
using Base = CudaKernel;
public:
Gemm(const OpKernelInfo& info) : CudaKernel(info) {
int64_t temp;
ORT_ENFORCE(info.GetAttr<int64_t>("transA", &temp).IsOK());
trans_A_ = (temp != 0);
ORT_ENFORCE(info.GetAttr<int64_t>("transB", &temp).IsOK());
trans_B_ = (temp != 0);
ORT_ENFORCE(info.GetAttr<float>("alpha", &alpha_).IsOK());
ORT_ENFORCE(info.GetAttr<float>("beta", &beta_).IsOK());
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
bool trans_A_;
bool trans_B_;
float alpha_;
float beta_;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/rocm_common.h"
namespace onnxruntime {
namespace rocm {
template <typename T>
Status SoftmaxDropoutGradImpl(hipStream_t stream, miopenHandle_t miopen_handle, T* input_grad_data,
const T* output_grad_data, const bool* mask_data, const T* softmax_output_data,
int element_count, int batch_count, const float ratio);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/cuda_common.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
Status SoftmaxDropoutGradImpl(cudaStream_t stream, cudnnHandle_t cudnn_handle, T* input_grad_data,
const T* output_grad_data, const bool* mask_data, const T* softmax_output_data,
int element_count, int batch_count, const float ratio);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
class SoftmaxGrad final : public RocmKernel {
public:
SoftmaxGrad(const OpKernelInfo& info) : RocmKernel{info} {
const auto& op_type = info.node().OpType();
is_since_opset_13_ = (op_type == "SoftmaxGrad_13" || op_type == "LogSoftmaxGrad_13");
info.GetAttrOrDefault("axis", &axis_, static_cast<int64_t>(is_since_opset_13_ ? -1 : 1));
is_log_softmax_ = (op_type == "LogSoftmaxGrad" || op_type == "LogSoftmaxGrad_13");
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
int64_t axis_;
bool is_log_softmax_;
bool is_since_opset_13_;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
class SoftmaxGrad final : public CudaKernel {
public:
SoftmaxGrad(const OpKernelInfo& info) : CudaKernel{info} {
const auto& op_type = info.node().OpType();
is_since_opset_13_ = (op_type == "SoftmaxGrad_13" || op_type == "LogSoftmaxGrad_13");
info.GetAttrOrDefault("axis", &axis_, static_cast<int64_t>(is_since_opset_13_ ? -1 : 1));
is_log_softmax_ = (op_type == "LogSoftmaxGrad" || op_type == "LogSoftmaxGrad_13");
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
int64_t axis_;
bool is_log_softmax_;
bool is_since_opset_13_;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/rocm_common.h"
#include "core/providers/rocm/shared_inc/rocm_utils.h"
namespace onnxruntime {
namespace rocm {
template <typename T>
Status SoftmaxGradImpl(hipStream_t stream, miopenHandle_t miopen_handle, T* input_grad, const T* output_grad,
const T* softmax_output, int element_count, int batch_count, bool is_log_softmax);
}
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/cuda_common.h"
#include "core/providers/cuda/shared_inc/cuda_utils.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
Status SoftmaxGradImpl(cudaStream_t stream, cudnnHandle_t cudnn_handle, T* input_grad, const T* output_grad,
const T* softmax_output, int element_count, int batch_count, bool is_log_softmax);
}
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
template <bool UseBitmask>
class DropoutGrad final : public RocmKernel {
public:
DropoutGrad(const OpKernelInfo& info) : RocmKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
private:
static constexpr float default_ratio_ = 0.5f;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
template <bool UseBitmask>
class DropoutGrad final : public CudaKernel {
public:
DropoutGrad(const OpKernelInfo& info) : CudaKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
private:
static constexpr float default_ratio_ = 0.5f;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
namespace onnxruntime {
namespace rocm {
template <typename T>
void DropoutGradientKernelImpl(hipStream_t stream, const int64_t N, const T* dY_data, const void* mask_data,
const float ratio, T* dX_data, bool use_bitmask);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
namespace onnxruntime {
namespace cuda {
template <typename T>
void DropoutGradientKernelImpl(cudaStream_t stream, const int64_t N, const T* dY_data, const void* mask_data,
const float ratio, T* dX_data, bool use_bitmask);
} // namespace cuda
} // namespace onnxruntime
### |
#pragma once
#include "core/common/common.h"
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
template <typename T, typename U, typename V, bool simplified>
class LayerNormGrad final : public RocmKernel {
public:
LayerNormGrad(const OpKernelInfo& op_kernel_info);
Status ComputeInternal(OpKernelContext* ctx) const override;
private:
int64_t axis_;
};
template <typename T, typename U, typename V>
class InvertibleLayerNormGrad final : public RocmKernel {
public:
InvertibleLayerNormGrad(const OpKernelInfo& op_kernel_info);
Status ComputeInternal(OpKernelContext* ctx) const override;
private:
int64_t axis_;
};
} // namespace rocm
} // namespace onnxruntime### |
#pragma once
#include "core/common/common.h"
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
template <typename T, typename U, typename V, bool simplified>
class LayerNormGrad final : public CudaKernel {
public:
LayerNormGrad(const OpKernelInfo& op_kernel_info);
Status ComputeInternal(OpKernelContext* ctx) const override;
private:
int64_t axis_;
};
template <typename T, typename U, typename V>
class InvertibleLayerNormGrad final : public CudaKernel {
public:
InvertibleLayerNormGrad(const OpKernelInfo& op_kernel_info);
Status ComputeInternal(OpKernelContext* ctx) const override;
private:
int64_t axis_;
};
} // namespace cuda
} // namespace onnxruntime
### |
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//
// Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
// NVIDIA/apex is licensed under the
// BSD 3 - Clause "New" or "Revised" License
//
/* Modifications Copyright (c) Microsoft. */
#pragma once
#include "core/providers/rocm/rocm_common.h"
namespace onnxruntime {
namespace rocm {
template <typename T, typename U, typename V, bool simplified>
void HostLayerNormGradient(
const hipDeviceProp_t& prop,
hipStream_t stream,
const V* dout,
const T* input,
const V* output,
const V* gamma,
const V* beta,
const U* mean,
const U* invvar,
int64_t n1,
int64_t n2,
T* grad_input,
V* grad_gamma,
V* grad_beta,
U* part_grad_gamma,
U* part_grad_beta,
const int part_size);
} // namespace rocm
} // namespace onnxruntime
### |
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//
// Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
// NVIDIA/apex is licensed under the
// BSD 3 - Clause "New" or "Revised" License
//
/* Modifications Copyright (c) Microsoft. */
#pragma once
#include "core/providers/cuda/cuda_common.h"
namespace onnxruntime {
namespace cuda {
template <typename T, typename U, typename V, bool simplified>
void HostLayerNormGradient(
const cudaDeviceProp& prop,
cudaStream_t stream,
const V* dout,
const T* input,
const V* output,
const V* gamma,
const V* beta,
const U* mean,
const U* invvar,
int64_t n1,
int64_t n2,
T* grad_input,
V* grad_gamma,
V* grad_beta,
U* part_grad_gamma,
U* part_grad_beta,
const int part_size);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
template <typename T1, typename T2, typename T3, typename T4, typename T_GRAD, typename T_GRAD_NORM, typename T_MIXED_PRECISION_FP>
class AdamOptimizer final : public RocmKernel {
public:
AdamOptimizer(const OpKernelInfo& info) : RocmKernel(info) {
info.GetAttrOrDefault("alpha", &alpha_, 0.9f);
info.GetAttrOrDefault("beta", &beta_, 0.999f);
info.GetAttrOrDefault("lambda", &lambda_, 0.0f);
info.GetAttrOrDefault("epsilon", &epsilon_, 1e-8f);
info.GetAttrOrDefault("max_norm_clip", &max_norm_clip_, 1.0f);
int64_t tmp_flag = static_cast<int64_t>(0);
ORT_ENFORCE(info.GetAttr<int64_t>("do_bias_correction", &tmp_flag).IsOK(), "Missing/Invalid do_bias_correction");
ORT_ENFORCE(tmp_flag == 0 || tmp_flag == 1, "do_bias_correction must be either 0 or 1.");
ORT_ENFORCE(max_norm_clip_ != 0, "max_norm_clip must NOT be 0.");
do_bias_correction_ = tmp_flag != 0 ? true : false;
info.GetAttrOrDefault("weight_decay_mode", &weight_decay_mode_, static_cast<int64_t>(0));
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
float alpha_;
float beta_;
float lambda_;
float epsilon_;
float max_norm_clip_;
bool do_bias_correction_;
int64_t weight_decay_mode_;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
template <typename T1, typename T2, typename T3, typename T4, typename T_GRAD, typename T_GRAD_NORM, typename T_MIXED_PRECISION_FP>
class AdamOptimizer final : public CudaKernel {
public:
AdamOptimizer(const OpKernelInfo& info) : CudaKernel(info) {
info.GetAttrOrDefault("alpha", &alpha_, 0.9f);
info.GetAttrOrDefault("beta", &beta_, 0.999f);
info.GetAttrOrDefault("lambda", &lambda_, 0.0f);
info.GetAttrOrDefault("epsilon", &epsilon_, 1e-8f);
info.GetAttrOrDefault("max_norm_clip", &max_norm_clip_, 1.0f);
int64_t tmp_flag = static_cast<int64_t>(0);
ORT_ENFORCE(info.GetAttr<int64_t>("do_bias_correction", &tmp_flag).IsOK(), "Missing/Invalid do_bias_correction");
ORT_ENFORCE(tmp_flag == 0 || tmp_flag == 1, "do_bias_correction must be either 0 or 1.");
ORT_ENFORCE(max_norm_clip_ != 0, "max_norm_clip must NOT be 0.");
do_bias_correction_ = tmp_flag != 0 ? true : false;
info.GetAttrOrDefault("weight_decay_mode", &weight_decay_mode_, static_cast<int64_t>(0));
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
float alpha_;
float beta_;
float lambda_;
float epsilon_;
float max_norm_clip_;
bool do_bias_correction_;
int64_t weight_decay_mode_;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <cstdint>
#include <hip/hip_runtime.h>
namespace onnxruntime {
namespace rocm {
template <typename T1, typename T2, typename T3, typename T4, typename T_GRAD, typename T_GRAD_NORM, typename T_MIXED_PRECISION_FP>
void AdamOptimizerImpl(
hipStream_t stream,
const T1* eta,
const T2 update_count,
const T3* weights,
const T_GRAD* grads,
const T4* moment_1,
const T4* moment_2,
const T3* loss_scale,
const T_GRAD_NORM* grad_norm,
const float alpha,
const float beta,
const float lambda,
const float epsilon,
const float max_norm,
const bool do_bias_correction,
const int64_t weight_decay_mode,
T4* moment_1_out,
T4* moment_2_out,
T3* weights_out,
T_GRAD* grads_out,
T_MIXED_PRECISION_FP* mixed_precision_weights_out,
size_t count);
}
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <cstdint>
#include <cuda_runtime.h>
namespace onnxruntime {
namespace cuda {
template <typename T1, typename T2, typename T3, typename T4, typename T_GRAD, typename T_GRAD_NORM, typename T_MIXED_PRECISION_FP>
void AdamOptimizerImpl(
cudaStream_t stream,
const T1* eta,
const T2 update_count,
const T3* weights,
const T_GRAD* grads,
const T4* moment_1,
const T4* moment_2,
const T3* loss_scale,
const T_GRAD_NORM* grad_norm,
const float alpha,
const float beta,
const float lambda,
const float epsilon,
const float max_norm,
const bool do_bias_correction,
const int64_t weight_decay_mode,
T4* moment_1_out,
T4* moment_2_out,
T3* weights_out,
T_GRAD* grads_out,
T_MIXED_PRECISION_FP* mixed_precision_weights_out,
size_t count);
}
} // namespace onnxruntime
### |
#include <memory>
#include <utility>
#include "core/providers/shared_library/provider_api.h"
#include "orttraining/training_ops/rocm/optimizer/adamw/adamw.h"
#include "orttraining/training_ops/rocm/optimizer/adamw/adamw_impl.h"
#include "orttraining/training_ops/rocm/optimizer/common.h"
namespace onnxruntime {
namespace rocm {
ONNX_OPERATOR_KERNEL_EX(
AdamWOptimizer, kMSDomain, 1, kRocmExecutionProvider, (*KernelDefBuilder::Create())
.InputMemoryType(OrtMemTypeCPUInput, 0)
.InputMemoryType(OrtMemTypeCPUInput, 1)
.InputMemoryType(OrtMemTypeCPUInput, 6)
.OutputMemoryType(OrtMemTypeCPUOutput, 0)
.Alias(2, 1)
.Alias(4, 2)
.Alias(5, 3)
.TypeConstraint("T1", DataTypeImpl::GetTensorType<float>())
.TypeConstraint("T2", DataTypeImpl::GetTensorType<int64_t>())
.TypeConstraint("S_WEIGHT", DataTypeImpl::AllFixedSizeSequenceTensorTypes())
.TypeConstraint("S_GRAD", DataTypeImpl::AllFixedSizeSequenceTensorTypes())
.TypeConstraint("S_MOMENT", DataTypeImpl::AllFixedSizeSequenceTensorTypes()), AdamWOptimizer);
Status AdamWOptimizer::ComputeInternal(OpKernelContext* ctx) const {
AdamWOptimizerBase::Prepare p;
ORT_RETURN_IF_ERROR(PrepareForCompute(ctx, p));
int64_t* updated_flag_ptr = p.updated_flag->template MutableData<int64_t>();
const Tensor* update_signal = ctx->Input<Tensor>(6);
if (update_signal == nullptr || *update_signal->template Data<bool>()) {
typedef typename ToHipType<float>::MappedType HipT_FLOAT;
typedef AdamWMTAFunctor<HipT_FLOAT, HipT_FLOAT, HipT_FLOAT> TFunctor;
TFunctor functor;
const float* lr_ptr = p.learning_rate->template Data<float>();
const int64_t* step_ptr = p.step->template Data<int64_t>();
ORT_ENFORCE(lr_ptr && step_ptr);
launch_multi_tensor_functor<MTA_ADAMW_GROUP_SIZE, TFunctor>(
Stream(ctx), MTA_ADAMW_CHUNK_SIZE, p.grouped_tensor_sizes, p.grouped_tensor_pointers, functor, alpha_, beta_, epsilon_, *lr_ptr, weight_decay_, adam_mode_, correct_bias_, *step_ptr);
*updated_flag_ptr = 1;
} else {
*updated_flag_ptr = 0;
}
if (p.updated_weights != nullptr) {
ORT_RETURN_IF_ERROR(CopyIfNotSameROCMBuffer(ctx, p.num_of_weights, p.weights, p.updated_weights));
}
if (p.updated_momentums_1 != nullptr) {
ORT_RETURN_IF_ERROR(CopyIfNotSameROCMBuffer(ctx, p.num_of_weights, p.momentums_1, p.updated_momentums_1));
}
if (p.updated_momentums_2 != nullptr) {
ORT_RETURN_IF_ERROR(CopyIfNotSameROCMBuffer(ctx, p.num_of_weights, p.momentums_2, p.updated_momentums_2));
}
return Status::OK();
}
}
} ### |
#include <memory>
#include <utility>
#include "core/providers/shared_library/provider_api.h"
#include "orttraining/training_ops/cuda/optimizer/adamw/adamw.h"
#include "orttraining/training_ops/cuda/optimizer/adamw/adamw_impl.h"
#include "orttraining/training_ops/cuda/optimizer/common.h"
namespace onnxruntime {
namespace cuda {
ONNX_OPERATOR_KERNEL_EX(
AdamWOptimizer, kMSDomain, 1, kCudaExecutionProvider, (*KernelDefBuilder::Create())
.InputMemoryType(OrtMemTypeCPUInput, 0)
.InputMemoryType(OrtMemTypeCPUInput, 1)
.InputMemoryType(OrtMemTypeCPUInput, 6)
.OutputMemoryType(OrtMemTypeCPUOutput, 0)
.Alias(2, 1)
.Alias(4, 2)
.Alias(5, 3)
.TypeConstraint("T1", DataTypeImpl::GetTensorType<float>())
.TypeConstraint("T2", DataTypeImpl::GetTensorType<int64_t>())
.TypeConstraint("S_WEIGHT", DataTypeImpl::AllFixedSizeSequenceTensorTypes())
.TypeConstraint("S_GRAD", DataTypeImpl::AllFixedSizeSequenceTensorTypes())
.TypeConstraint("S_MOMENT", DataTypeImpl::AllFixedSizeSequenceTensorTypes()), AdamWOptimizer);
Status AdamWOptimizer::ComputeInternal(OpKernelContext* ctx) const {
AdamWOptimizerBase::Prepare p;
ORT_RETURN_IF_ERROR(PrepareForCompute(ctx, p));
int64_t* updated_flag_ptr = p.updated_flag->template MutableData<int64_t>();
const Tensor* update_signal = ctx->Input<Tensor>(6);
if (update_signal == nullptr || *update_signal->template Data<bool>()) {
typedef typename ToCudaType<float>::MappedType CudaT_FLOAT;
typedef AdamWMTAFunctor<CudaT_FLOAT, CudaT_FLOAT, CudaT_FLOAT> TFunctor;
TFunctor functor;
const float* lr_ptr = p.learning_rate->template Data<float>();
const int64_t* step_ptr = p.step->template Data<int64_t>();
ORT_ENFORCE(lr_ptr && step_ptr);
launch_multi_tensor_functor<MTA_ADAMW_GROUP_SIZE, TFunctor>(
Stream(ctx), MTA_ADAMW_CHUNK_SIZE, p.grouped_tensor_sizes, p.grouped_tensor_pointers, functor, alpha_, beta_, epsilon_, *lr_ptr, weight_decay_, adam_mode_, correct_bias_, *step_ptr);
*updated_flag_ptr = 1;
} else {
*updated_flag_ptr = 0;
}
if (p.updated_weights != nullptr) {
ORT_RETURN_IF_ERROR(CopyIfNotSameCUDABuffer(ctx, p.num_of_weights, p.weights, p.updated_weights));
}
if (p.updated_momentums_1 != nullptr) {
ORT_RETURN_IF_ERROR(CopyIfNotSameCUDABuffer(ctx, p.num_of_weights, p.momentums_1, p.updated_momentums_1));
}
if (p.updated_momentums_2 != nullptr) {
ORT_RETURN_IF_ERROR(CopyIfNotSameCUDABuffer(ctx, p.num_of_weights, p.momentums_2, p.updated_momentums_2));
}
return Status::OK();
}
}
}
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
template <typename T>
class MatMul final : public RocmKernel {
using Base = RocmKernel;
public:
MatMul(const OpKernelInfo& info)
: RocmKernel(info),
alpha_{info.GetAttrOrDefault<float>("alpha", 1.0f)},
trans_A_{info.GetAttrOrDefault<int64_t>("transA", 0) != 0},
trans_B_{info.GetAttrOrDefault<int64_t>("transB", 0) != 0},
trans_batch_a_{info.GetAttrOrDefault<int64_t>("transBatchA", 0) != 0},
trans_batch_b_{info.GetAttrOrDefault<int64_t>("transBatchB", 0) != 0} {}
Status ComputeInternal(OpKernelContext* context) const override;
private:
const float alpha_;
const bool trans_A_;
const bool trans_B_;
const bool trans_batch_a_;
const bool trans_batch_b_;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
class MatMul final : public CudaKernel {
using Base = CudaKernel;
public:
MatMul(const OpKernelInfo& info)
: CudaKernel(info),
alpha_{info.GetAttrOrDefault<float>("alpha", 1.0f)},
trans_A_{info.GetAttrOrDefault<int64_t>("transA", 0) != 0},
trans_B_{info.GetAttrOrDefault<int64_t>("transB", 0) != 0},
trans_batch_a_{info.GetAttrOrDefault<int64_t>("transBatchA", 0) != 0},
trans_batch_b_{info.GetAttrOrDefault<int64_t>("transBatchB", 0) != 0} {}
Status ComputeInternal(OpKernelContext* context) const override;
private:
const float alpha_;
const bool trans_A_;
const bool trans_B_;
const bool trans_batch_a_;
const bool trans_batch_b_;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <vector>
#include "core/common/common.h"
#include "core/providers/rocm/rocm_kernel.h"
#include "orttraining/training_ops/cpu/optimizer/adamw/adamwbase.h"
namespace onnxruntime {
namespace rocm {
class AdamWOptimizer final : public RocmKernel, public contrib::AdamWOptimizerBase {
public:
AdamWOptimizer(const OpKernelInfo& info) : RocmKernel(info), contrib::AdamWOptimizerBase(info) {
}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <vector>
#include "core/common/common.h"
#include "core/providers/cuda/cuda_kernel.h"
#include "orttraining/training_ops/cpu/optimizer/adamw/adamwbase.h"
namespace onnxruntime {
namespace cuda {
class AdamWOptimizer final : public CudaKernel, public contrib::AdamWOptimizerBase {
public:
AdamWOptimizer(const OpKernelInfo& info) : CudaKernel(info), contrib::AdamWOptimizerBase(info) {
}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/multi_tensor/common.cuh"
namespace onnxruntime {
namespace rocm {
#define MTA_ADAMW_GROUP_SIZE 4
#define MTA_ADAMW_CHUNK_SIZE 2048 * 32
template <typename T_WEIGHT, typename T_GRAD, typename T_MOMENTUM>
struct AdamWMTAFunctor {
void operator()(hipStream_t stream,
ChunkGroup<MTA_ADAMW_GROUP_SIZE> chunks,
const float alpha,
const float beta,
const float epsilon,
const float lr,
const float decay,
const int64_t adam_mode,
const int64_t correct_bias,
const int64_t update_count);
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/multi_tensor/common.cuh"
namespace onnxruntime {
namespace cuda {
#define MTA_ADAMW_GROUP_SIZE 4
#define MTA_ADAMW_CHUNK_SIZE 2048 * 32
template <typename T_WEIGHT, typename T_GRAD, typename T_MOMENTUM>
struct AdamWMTAFunctor {
void operator()(cudaStream_t stream,
ChunkGroup<MTA_ADAMW_GROUP_SIZE> chunks,
const float alpha,
const float beta,
const float epsilon,
const float lr,
const float decay,
const int64_t adam_mode,
const int64_t correct_bias,
const int64_t update_count);
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <string>
#include "core/common/common.h"
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
class InplaceClipGradNorm final : public RocmKernel {
public:
InplaceClipGradNorm(const OpKernelInfo& info) : RocmKernel(info) {
info.GetAttrOrDefault("max_norm", &max_norm_, 1.0f);
info.GetAttrOrDefault("norm_type", &norm_type_, std::string("fro"));
ORT_ENFORCE(norm_type_ == "fro", "Given norm type ", norm_type_, " is not supported for InplaceClipGradNorm.");
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
float max_norm_;
std::string norm_type_;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <string>
#include "core/common/common.h"
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
class InplaceClipGradNorm final : public CudaKernel {
public:
InplaceClipGradNorm(const OpKernelInfo& info) : CudaKernel(info) {
info.GetAttrOrDefault("max_norm", &max_norm_, 1.0f);
info.GetAttrOrDefault("norm_type", &norm_type_, std::string("fro"));
ORT_ENFORCE(norm_type_ == "fro", "Given norm type ", norm_type_, " is not supported for InplaceClipGradNorm.");
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
float max_norm_;
std::string norm_type_;
};
} // namespace cuda
} // namespace onnxruntime
### |
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <algorithm>
#include "core/providers/rocm/rocm_common.h"
#include "orttraining/training_ops/rocm/optimizer/clip_grad_norm/clip_grad_norm_impl.h"
namespace onnxruntime {
namespace rocm {
template <typename T>
__global__ void ClipGradNorm(
ChunkGroup<ClipGradNormGroupSize> chunks,
const float* total_norm,
const float epsilon,
const float max_norm) {
const int tensor_idx = chunks.block_index_to_tensor_group_index[blockIdx.x];
const int tensor_size = chunks.tensor_sizes[tensor_idx];
const int chunk_start_idx = chunks.block_index_to_chunk_start_index[blockIdx.x];
// chunk_size is chunks.chunk_size if the loaded chunk is full. Otherwise (this
// chunk is the last one in the source tensor), the actual size is determined
// by the bound of the source tensor.
const int chunk_size = min(tensor_size, chunk_start_idx + chunks.chunk_size) - chunk_start_idx;
T* gradients_chunk_ptr = static_cast<T*>(chunks.tensor_ptrs[0][tensor_idx]) + chunk_start_idx;
#pragma unroll 4
for (int i = threadIdx.x; i < chunk_size; i += blockDim.x) {
float clip_coefficient = max_norm / (*total_norm + epsilon);
gradients_chunk_ptr[i] = static_cast<T>(gradients_chunk_ptr[i]) *
static_cast<T>(fminf(clip_coefficient, 1.0f));
}
}
template <typename T>
void ClipGradNormFunctor<T>::operator()(
hipStream_t stream,
ChunkGroup<ClipGradNormGroupSize> chunks,
const float* total_norm,
const float epsilon,
const float max_norm) {
const int num_blocks_per_grid = chunks.chunk_count;
const int num_threads_per_block = ChunkGroup<ClipGradNormGroupSize>::thread_count_per_block;
ClipGradNorm<T><<<num_blocks_per_grid, num_threads_per_block, 0, stream>>>(chunks, total_norm, epsilon, max_norm);
}
#define SPECIALIZE_CLIPGRADNORM_FUNCTOR(T) \
template void ClipGradNormFunctor<T>::operator()(hipStream_t stream, \
ChunkGroup<ClipGradNormGroupSize> chunks, \
const float* total_norm, \
const float epsilon, \
const float max_norm); \
\
template __global__ void ClipGradNorm<T>(ChunkGroup<ClipGradNormGroupSize> chunks, \
const float* total_norm, \
const float epsilon, \
const float max_norm);
SPECIALIZE_CLIPGRADNORM_FUNCTOR(float);
#undef SPECIALIZE_CLIPGRADNORM_FUNCTOR
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <algorithm>
#include "core/providers/cuda/cuda_common.h"
#include "orttraining/training_ops/cuda/optimizer/clip_grad_norm/clip_grad_norm_impl.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
__global__ void ClipGradNorm(
ChunkGroup<ClipGradNormGroupSize> chunks,
const float* total_norm,
const float epsilon,
const float max_norm) {
const int tensor_idx = chunks.block_index_to_tensor_group_index[blockIdx.x];
const int tensor_size = chunks.tensor_sizes[tensor_idx];
const int chunk_start_idx = chunks.block_index_to_chunk_start_index[blockIdx.x];
// chunk_size is chunks.chunk_size if the loaded chunk is full. Otherwise (this
// chunk is the last one in the source tensor), the actual size is determined
// by the bound of the source tensor.
const int chunk_size = min(tensor_size, chunk_start_idx + chunks.chunk_size) - chunk_start_idx;
T* gradients_chunk_ptr = static_cast<T*>(chunks.tensor_ptrs[0][tensor_idx]) + chunk_start_idx;
#pragma unroll 4
for (int i = threadIdx.x; i < chunk_size; i += blockDim.x) {
float clip_coefficient = max_norm / (*total_norm + epsilon);
gradients_chunk_ptr[i] = static_cast<T>(gradients_chunk_ptr[i]) *
static_cast<T>(fminf(clip_coefficient, 1.0f));
}
}
template <typename T>
void ClipGradNormFunctor<T>::operator()(
cudaStream_t stream,
ChunkGroup<ClipGradNormGroupSize> chunks,
const float* total_norm,
const float epsilon,
const float max_norm) {
const int num_blocks_per_grid = chunks.chunk_count;
const int num_threads_per_block = ChunkGroup<ClipGradNormGroupSize>::thread_count_per_block;
ClipGradNorm<T><<<num_blocks_per_grid, num_threads_per_block, 0, stream>>>(chunks, total_norm, epsilon, max_norm);
}
#define SPECIALIZE_CLIPGRADNORM_FUNCTOR(T) \
template void ClipGradNormFunctor<T>::operator()(cudaStream_t stream, \
ChunkGroup<ClipGradNormGroupSize> chunks, \
const float* total_norm, \
const float epsilon, \
const float max_norm); \
\
template __global__ void ClipGradNorm<T>(ChunkGroup<ClipGradNormGroupSize> chunks, \
const float* total_norm, \
const float epsilon, \
const float max_norm);
SPECIALIZE_CLIPGRADNORM_FUNCTOR(float);
#undef SPECIALIZE_CLIPGRADNORM_FUNCTOR
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/multi_tensor/common.cuh"
namespace onnxruntime {
namespace rocm {
constexpr int ClipGradNormGroupSize = 1;
template <typename T>
struct ClipGradNormFunctor {
void operator()(hipStream_t stream,
ChunkGroup<ClipGradNormGroupSize> chunks,
const float* l2_norm,
const float epsilon,
const float max_norm);
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/multi_tensor/common.cuh"
namespace onnxruntime {
namespace cuda {
constexpr int ClipGradNormGroupSize = 1;
template <typename T>
struct ClipGradNormFunctor {
void operator()(cudaStream_t stream,
ChunkGroup<ClipGradNormGroupSize> chunks,
const float* l2_norm,
const float epsilon,
const float max_norm);
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "orttraining/training_ops/rocm/optimizer/common.h"
namespace onnxruntime {
namespace rocm {
Status CopyIfNotSameROCMBuffer(OpKernelContext* ctx, size_t number_of_values,
const TensorSeq* values, TensorSeq* updated_values) {
if (values != updated_values) {
AllocatorPtr alloc;
ORT_ENFORCE(ctx->GetTempSpaceAllocator(&alloc).IsOK(),
"ROCM CopyIfNotSameBuffer for tensor sequence: Unable to get an allocator.");
hipStream_t hip_stream = ctx->GetComputeStream()
? static_cast<hipStream_t>(ctx->GetComputeStream()->GetHandle())
: nullptr;
updated_values->SetType(values->DataType());
updated_values->Reserve(number_of_values);
for (size_t input_idx = 0; input_idx < number_of_values; ++input_idx) {
const Tensor& source_tensor = values->Get(input_idx);
std::unique_ptr<Tensor> target_tensor = Tensor::Create(source_tensor.DataType(), source_tensor.Shape(), alloc);
HIP_RETURN_IF_ERROR(hipMemcpyAsync(target_tensor->MutableDataRaw(),
source_tensor.DataRaw(),
source_tensor.SizeInBytes(),
hipMemcpyDeviceToDevice, hip_stream));
updated_values->Add(std::move(*target_tensor)); // Add will check for type consistency
}
}
return Status::OK();
}
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "orttraining/training_ops/cuda/optimizer/common.h"
namespace onnxruntime {
namespace cuda {
Status CopyIfNotSameCUDABuffer(OpKernelContext* ctx, size_t number_of_values,
const TensorSeq* values, TensorSeq* updated_values) {
if (values != updated_values) {
AllocatorPtr alloc;
ORT_ENFORCE(ctx->GetTempSpaceAllocator(&alloc).IsOK(),
"CUDA CopyIfNotSameBuffer for tensor sequence: Unable to get an allocator.");
cudaStream_t cuda_stream = ctx->GetComputeStream()
? static_cast<cudaStream_t>(ctx->GetComputeStream()->GetHandle())
: nullptr;
updated_values->SetType(values->DataType());
updated_values->Reserve(number_of_values);
for (size_t input_idx = 0; input_idx < number_of_values; ++input_idx) {
const Tensor& source_tensor = values->Get(input_idx);
std::unique_ptr<Tensor> target_tensor = Tensor::Create(source_tensor.DataType(), source_tensor.Shape(), alloc);
CUDA_RETURN_IF_ERROR(cudaMemcpyAsync(target_tensor->MutableDataRaw(),
source_tensor.DataRaw(),
source_tensor.SizeInBytes(),
cudaMemcpyDeviceToDevice, cuda_stream));
updated_values->Add(std::move(*target_tensor)); // Add will check for type consistency
}
}
return Status::OK();
}
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
namespace onnxruntime {
namespace rocm {
// ---------------------------------------------------------------------------
// _ComputeGradScale -- helper to calculate gradient scales based on global norms
// ---------------------------------------------------------------------------
template <typename TLossScale, typename TGradNorm, typename TFinalScale>
__device__ __forceinline__ TFinalScale _ComputeGradScale(
const TLossScale* loss_scale, // Scale of the gradient (called "scaled_g_norm" below)
const TGradNorm* scaled_g_norm, // Scaled gradient norm is an optimizer input
const TFinalScale max_g_norm) {
const TFinalScale scale = loss_scale != nullptr ? TFinalScale(*loss_scale) : TFinalScale(1.f);
const TFinalScale scaled_max_g_norm = TFinalScale(scale * max_g_norm);
// This number is used to divide the scaled gradient before applying optimizers.
TFinalScale scaled_g_scaling_factor = scale;
if (scaled_g_norm != nullptr && TFinalScale(*scaled_g_norm) > scaled_max_g_norm) {
scaled_g_scaling_factor = TFinalScale(*scaled_g_norm) / max_g_norm;
}
return scaled_g_scaling_factor;
}
} // namespace rocm
} // namespace onnxruntime### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <cuda_runtime.h>
#include <cuda_fp16.h>
namespace onnxruntime {
namespace cuda {
// ---------------------------------------------------------------------------
// _ComputeGradScale -- helper to calculate gradient scales based on global norms
// ---------------------------------------------------------------------------
template <typename TLossScale, typename TGradNorm, typename TFinalScale>
__device__ __forceinline__ TFinalScale _ComputeGradScale(
const TLossScale* loss_scale, // Scale of the gradient (called "scaled_g_norm" below)
const TGradNorm* scaled_g_norm, // Scaled gradient norm is an optimizer input
const TFinalScale max_g_norm) {
const TFinalScale scale = loss_scale != nullptr ? TFinalScale(*loss_scale) : TFinalScale(1.f);
const TFinalScale scaled_max_g_norm = TFinalScale(scale * max_g_norm);
// This number is used to divide the scaled gradient before applying optimizers.
TFinalScale scaled_g_scaling_factor = scale;
if (scaled_g_norm != nullptr && TFinalScale(*scaled_g_norm) > scaled_max_g_norm) {
scaled_g_scaling_factor = TFinalScale(*scaled_g_norm) / max_g_norm;
}
return scaled_g_scaling_factor;
}
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/rocm_common.h"
namespace onnxruntime {
namespace rocm {
template <typename T>
Status CopyIfNotSameBuffer(hipStream_t stream, const Tensor& source_tensor, Tensor& target_tensor) {
const T* source = source_tensor.template Data<T>();
T* target = target_tensor.template MutableData<T>();
if (target != source) {
HIP_RETURN_IF_ERROR(hipMemcpyAsync(target, source, source_tensor.SizeInBytes(), hipMemcpyDeviceToDevice,
stream));
}
return Status::OK();
}
Status CopyIfNotSameROCMBuffer(OpKernelContext* ctx, size_t number_of_values, const TensorSeq* values,
TensorSeq* updated_values);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/cuda_common.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
Status CopyIfNotSameBuffer(cudaStream_t stream, const Tensor& source_tensor, Tensor& target_tensor) {
const T* source = source_tensor.template Data<T>();
T* target = target_tensor.template MutableData<T>();
if (target != source) {
CUDA_RETURN_IF_ERROR(cudaMemcpyAsync(target, source, source_tensor.SizeInBytes(), cudaMemcpyDeviceToDevice,
stream));
}
return Status::OK();
}
Status CopyIfNotSameCUDABuffer(OpKernelContext* ctx, size_t number_of_values, const TensorSeq* values,
TensorSeq* updated_values);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
template <typename T>
class ZeroGradient final : public RocmKernel {
public:
ZeroGradient(const OpKernelInfo& info) : RocmKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T, typename T_GRAD>
class InPlaceAccumulator final : public RocmKernel {
public:
InPlaceAccumulator(const OpKernelInfo& info) : RocmKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T, typename T_GRAD>
class InPlaceAccumulatorV2 final : public RocmKernel {
public:
InPlaceAccumulatorV2(const OpKernelInfo& info) : RocmKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace rocm
} // namespace onnxruntime### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
class ZeroGradient final : public CudaKernel {
public:
ZeroGradient(const OpKernelInfo& info) : CudaKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T, typename T_GRAD>
class InPlaceAccumulator final : public CudaKernel {
public:
InPlaceAccumulator(const OpKernelInfo& info) : CudaKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
template <typename T, typename T_GRAD>
class InPlaceAccumulatorV2 final : public CudaKernel {
public:
InPlaceAccumulatorV2(const OpKernelInfo& info) : CudaKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <hip/hip_runtime.h>
namespace onnxruntime {
namespace rocm {
// Implementation can be found in rocm file
template <typename T, typename T_GRAD>
void InPlaceAccumulatorImpl(
hipStream_t stream,
const T* gradient_buffer,
const T_GRAD* gradient,
T* accumulated_gradient,
size_t count);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <cuda_runtime.h>
namespace onnxruntime {
namespace cuda {
// Implementation can be found in cuda file
template <typename T, typename T_GRAD>
void InPlaceAccumulatorImpl(
cudaStream_t stream,
const T* gradient_buffer,
const T_GRAD* gradient,
T* accumulated_gradient,
size_t count);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "matmul_integer.h"
#include "core/providers/cpu/math/matmul_helper.h"
#include "core/providers/rocm/rocm_common.h"
#include "core/providers/rocm/shared_inc/rocm_utils.h"
namespace onnxruntime {
namespace rocm {
Status ReduceRowSumOnMatrixA(hipStream_t stream, const int8_t* matrix, int32_t* row_sum, const int8_t offset, const MatMulComputeHelper& helper);
Status ReduceColSumOnMatrixB(hipStream_t stream, const int8_t* matrix, int32_t* col_sum, const int8_t offset, const MatMulComputeHelper& helper);
Status OffsetOutput(hipStream_t stream,
const int32_t* row_sum,
const int32_t* col_sum,
int32_t* output,
const int8_t a_offset,
const int8_t b_offset,
const MatMulComputeHelper& helper);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "matmul_integer.h"
#include "core/providers/cpu/math/matmul_helper.h"
#include "core/providers/cuda/cuda_common.h"
#include "core/providers/cuda/shared_inc/cuda_utils.h"
namespace onnxruntime {
namespace cuda {
Status ReduceRowSumOnMatrixA(cudaStream_t stream, const int8_t* matrix, int32_t* row_sum, const int8_t offset, const MatMulComputeHelper& helper);
Status ReduceColSumOnMatrixB(cudaStream_t stream, const int8_t* matrix, int32_t* col_sum, const int8_t offset, const MatMulComputeHelper& helper);
Status OffsetOutput(cudaStream_t stream,
const int32_t* row_sum,
const int32_t* col_sum,
int32_t* output,
const int8_t a_offset,
const int8_t b_offset,
const MatMulComputeHelper& helper);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
template <typename T1, typename T2, typename T3, typename T4, typename T_GRAD_NORM, typename T_MIXED_PRECISION_FP>
class LambOptimizer final : public RocmKernel {
public:
LambOptimizer(const OpKernelInfo& info) : RocmKernel(info) {
alpha_ = info.GetAttrsOrDefault("alpha", std::vector<float>(1024, 0.9f));
beta_ = info.GetAttrsOrDefault("beta", std::vector<float>(1024, 0.999f));
lambda_ = info.GetAttrsOrDefault("lambda", std::vector<float>(1024, 0.0f));
epsilon_ = info.GetAttrsOrDefault("epsilon", std::vector<float>(1024, 1e-6f));
max_norm_clip_ = info.GetAttrsOrDefault("max_norm_clip", std::vector<float>(1024, 1.0f));
ORT_ENFORCE(info.GetAttr<float>("ratio_min", &ratio_min_).IsOK(), "Missing/Invalid 'ratio_min' attribute value");
ORT_ENFORCE(info.GetAttr<float>("ratio_max", &ratio_max_).IsOK(), "Missing/Invalid 'ratio_max' attribute value");
for (const auto& max_norm : max_norm_clip_) {
ORT_ENFORCE(max_norm != 0, "max_norm_clip must NOT be 0.");
}
int64_t tmp_flag = static_cast<int64_t>(0);
ORT_ENFORCE(info.GetAttr<int64_t>("do_bias_correction", &tmp_flag).IsOK(), "Missing/Invalid do_bias_correction");
ORT_ENFORCE(tmp_flag == 0 || tmp_flag == 1, "do_bias_correction must be either 0 or 1.");
do_bias_correction_ = tmp_flag != 0 ? true : false;
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
std::vector<float> alpha_;
std::vector<float> beta_;
std::vector<float> lambda_;
std::vector<float> epsilon_;
std::vector<float> max_norm_clip_;
float ratio_min_;
float ratio_max_;
bool do_bias_correction_;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
template <typename T1, typename T2, typename T3, typename T4, typename T_GRAD_NORM, typename T_MIXED_PRECISION_FP>
class LambOptimizer final : public CudaKernel {
public:
LambOptimizer(const OpKernelInfo& info) : CudaKernel(info) {
alpha_ = info.GetAttrsOrDefault("alpha", std::vector<float>(1024, 0.9f));
beta_ = info.GetAttrsOrDefault("beta", std::vector<float>(1024, 0.999f));
lambda_ = info.GetAttrsOrDefault("lambda", std::vector<float>(1024, 0.0f));
epsilon_ = info.GetAttrsOrDefault("epsilon", std::vector<float>(1024, 1e-6f));
max_norm_clip_ = info.GetAttrsOrDefault("max_norm_clip", std::vector<float>(1024, 1.0f));
ORT_ENFORCE(info.GetAttr<float>("ratio_min", &ratio_min_).IsOK(), "Missing/Invalid 'ratio_min' attribute value");
ORT_ENFORCE(info.GetAttr<float>("ratio_max", &ratio_max_).IsOK(), "Missing/Invalid 'ratio_max' attribute value");
for (const auto& max_norm : max_norm_clip_) {
ORT_ENFORCE(max_norm != 0, "max_norm_clip must NOT be 0.");
}
int64_t tmp_flag = static_cast<int64_t>(0);
ORT_ENFORCE(info.GetAttr<int64_t>("do_bias_correction", &tmp_flag).IsOK(), "Missing/Invalid do_bias_correction");
ORT_ENFORCE(tmp_flag == 0 || tmp_flag == 1, "do_bias_correction must be either 0 or 1.");
do_bias_correction_ = tmp_flag != 0 ? true : false;
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
std::vector<float> alpha_;
std::vector<float> beta_;
std::vector<float> lambda_;
std::vector<float> epsilon_;
std::vector<float> max_norm_clip_;
float ratio_min_;
float ratio_max_;
bool do_bias_correction_;
};
} // namespace cuda
} // namespace onnxruntime
### |
#pragma once
#include <hip/hip_runtime.h>
#include "core/providers/rocm/rocm_kernel.h"
#include "core/providers/rocm/multi_tensor/common.cuh"
#include "core/framework/stream_handles.h"
namespace onnxruntime {
namespace rocm {
template <typename T1, typename T2, typename T3, typename T_GRAD_NORM>
void LambComputeDirection(
hipStream_t stream, const T1* weights, const T2* grads, const T3* moment_1, const T3* moment_2, const T1* loss_scale, const T_GRAD_NORM* grad_norm, float alpha, float beta, float lambda, float epsilon, float max_norm, float alpha_correction, float beta_correction, T2* update_direction, T3* moment_1_out, T3* moment_2_out, size_t count);
template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP>
void LambUpdate(
hipStream_t stream, const T1* eta, const float ratio_min, const float ratio_max, const T2* r_norm, const T2* w_norm, const T2* weights, const T3* update_direction, T2* weights_out, T3* gradients_out, T_MIXED_PRECISION_FP* mixed_precision_weights_out, size_t count);
template <typename T1, typename T2, typename T3, typename T_GRAD_NORM>
struct LambMultiTensorComputeDirectionFunctor {
void operator()(
hipStream_t stream, ChunkGroup<6> chunk_group, const T1* loss_scale, const T_GRAD_NORM* grad_norm, const float lambda, const float alpha, const float beta, const float epsilon, const float max_norm, const float alpha_correction, const float beta_correction);
};
template <typename TIn1, typename TIn2, typename TOut1, typename TOut2, typename TBuf>
struct LambMultiTensorReductionFunctor {
void operator()(
hipStream_t stream, ChunkGroup<4> chunk_group, const RocmKernel& kernel, void* reduction_buffer, size_t reduction_buffer_size, onnxruntime::Stream* ort_stream);
};
struct LambMultiTensorSyncRangeAndLock {
int leading_block;
int number_blocks;
int completed_blocks;
};
template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP>
struct LambMultiTensorUpdateFunctor {
void operator()(
hipStream_t stream, ChunkGroup<7> chunk_group, const T1* eta, const float ratio_min, const float ratio_max);
};
}
} ### |
#pragma once
#include <cuda_runtime.h>
#include "core/providers/cuda/cuda_kernel.h"
#include "core/providers/cuda/multi_tensor/common.cuh"
#include "core/framework/stream_handles.h"
namespace onnxruntime {
namespace cuda {
template <typename T1, typename T2, typename T3, typename T_GRAD_NORM>
void LambComputeDirection(
cudaStream_t stream, const T1* weights, const T2* grads, const T3* moment_1, const T3* moment_2, const T1* loss_scale, const T_GRAD_NORM* grad_norm, float alpha, float beta, float lambda, float epsilon, float max_norm, float alpha_correction, float beta_correction, T2* update_direction, T3* moment_1_out, T3* moment_2_out, size_t count);
template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP>
void LambUpdate(
cudaStream_t stream, const T1* eta, const float ratio_min, const float ratio_max, const T2* r_norm, const T2* w_norm, const T2* weights, const T3* update_direction, T2* weights_out, T3* gradients_out, T_MIXED_PRECISION_FP* mixed_precision_weights_out, size_t count);
template <typename T1, typename T2, typename T3, typename T_GRAD_NORM>
struct LambMultiTensorComputeDirectionFunctor {
void operator()(
cudaStream_t stream, ChunkGroup<6> chunk_group, const T1* loss_scale, const T_GRAD_NORM* grad_norm, const float lambda, const float alpha, const float beta, const float epsilon, const float max_norm, const float alpha_correction, const float beta_correction);
};
template <typename TIn1, typename TIn2, typename TOut1, typename TOut2, typename TBuf>
struct LambMultiTensorReductionFunctor {
void operator()(
cudaStream_t stream, ChunkGroup<4> chunk_group, const CudaKernel& kernel, void* reduction_buffer, size_t reduction_buffer_size, onnxruntime::Stream* ort_stream);
};
struct LambMultiTensorSyncRangeAndLock {
int leading_block;
int number_blocks;
int completed_blocks;
};
template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP>
struct LambMultiTensorUpdateFunctor {
void operator()(
cudaStream_t stream, ChunkGroup<7> chunk_group, const T1* eta, const float ratio_min, const float ratio_max);
};
}
}
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "sg.h"
#include "sg_impl.h"
#include "core/providers/rocm/reduction/reduction_functions.h"
#include "core/providers/rocm/math/binary_elementwise_ops.h"
namespace onnxruntime {
namespace rocm {
ONNX_OPERATOR_KERNEL_EX(
SGDOptimizer,
kMSDomain,
1,
kRocmExecutionProvider,
(*KernelDefBuilder::Create())
.Alias(1, 0) // Update weights in-place
.Alias(2, 1) // Update gradients in-place
.TypeConstraint("T", DataTypeImpl::GetTensorType<float>()),
SGDOptimizer);
Status SGDOptimizer::ComputeInternal(OpKernelContext* ctx) const {
const Tensor& ETA = *ctx->Input<Tensor>(0);
const Tensor& W = *ctx->Input<Tensor>(1);
const Tensor& G = *ctx->Input<Tensor>(2);
Tensor* NW = ctx->Output(0, W.Shape());
Tensor* NG = ctx->Output(1, G.Shape());
ORT_ENFORCE(W.Shape() == G.Shape());
SGDOptimizerImpl(
Stream(ctx),
ETA.template Data<float>(),
W.template Data<float>(),
G.template Data<float>(),
NW != nullptr ? NW->template MutableData<float>() : nullptr,
NG != nullptr ? NG->template MutableData<float>() : nullptr,
W.Shape().Size());
return Status::OK();
}
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "sg.h"
#include "sg_impl.h"
#include "core/providers/cuda/reduction/reduction_functions.h"
#include "core/providers/cuda/math/binary_elementwise_ops.h"
namespace onnxruntime {
namespace cuda {
ONNX_OPERATOR_KERNEL_EX(
SGDOptimizer,
kMSDomain,
1,
kCudaExecutionProvider,
(*KernelDefBuilder::Create())
.Alias(1, 0) // Update weights in-place
.Alias(2, 1) // Update gradients in-place
.TypeConstraint("T", DataTypeImpl::GetTensorType<float>()),
SGDOptimizer);
Status SGDOptimizer::ComputeInternal(OpKernelContext* ctx) const {
const Tensor& ETA = *ctx->Input<Tensor>(0);
const Tensor& W = *ctx->Input<Tensor>(1);
const Tensor& G = *ctx->Input<Tensor>(2);
Tensor* NW = ctx->Output(0, W.Shape());
Tensor* NG = ctx->Output(1, G.Shape());
ORT_ENFORCE(W.Shape() == G.Shape());
SGDOptimizerImpl(
Stream(ctx),
ETA.template Data<float>(),
W.template Data<float>(),
G.template Data<float>(),
NW != nullptr ? NW->template MutableData<float>() : nullptr,
NG != nullptr ? NG->template MutableData<float>() : nullptr,
W.Shape().Size());
return Status::OK();
}
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
class SGDOptimizer final : public RocmKernel {
public:
SGDOptimizer(const OpKernelInfo& info) : RocmKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
class SGDOptimizer final : public CudaKernel {
public:
SGDOptimizer(const OpKernelInfo& info) : CudaKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace cuda
} // namespace onnxruntime
### |
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "sg_impl.h"
#include "core/providers/rocm/rocm_common.h"
#include "core/providers/rocm/cu_inc/common.cuh"
#include "core/providers/rocm/atomic/common.cuh"
namespace onnxruntime {
namespace rocm {
template <typename T>
__global__ void _SGDOptimizer(
const T* eta,
const T* weights,
const T* gradients,
T* weights_out,
T* gradients_out,
HIP_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
const T delta = -(*eta) * gradients[id];
if (gradients_out) {
gradients_out[id] = delta;
}
if (weights_out) {
weights_out[id] = weights[id] + delta;
}
}
template <typename T>
void SGDOptimizerImpl(
hipStream_t stream,
const T* eta,
const T* weights,
const T* gradients,
T* weights_out,
T* gradients_out,
size_t count) {
int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock));
HIP_LONG N = static_cast<HIP_LONG>(count);
_SGDOptimizer<T><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(
eta,
weights,
gradients,
weights_out,
gradients_out,
N);
}
#define SPECIALIZED_IMPL__SGDOptimizerImpl(T) \
template void SGDOptimizerImpl( \
hipStream_t stream, \
const T* eta, \
const T* weights, \
const T* gradients, \
T* weights_out, \
T* gradients_out, \
size_t count);
SPECIALIZED_IMPL__SGDOptimizerImpl(float)
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "sg_impl.h"
#include "core/providers/cuda/cuda_common.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/atomic/common.cuh"
namespace onnxruntime {
namespace cuda {
template <typename T>
__global__ void _SGDOptimizer(
const T* eta,
const T* weights,
const T* gradients,
T* weights_out,
T* gradients_out,
CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
const T delta = -(*eta) * gradients[id];
if (gradients_out) {
gradients_out[id] = delta;
}
if (weights_out) {
weights_out[id] = weights[id] + delta;
}
}
template <typename T>
void SGDOptimizerImpl(
cudaStream_t stream,
const T* eta,
const T* weights,
const T* gradients,
T* weights_out,
T* gradients_out,
size_t count) {
int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock));
CUDA_LONG N = static_cast<CUDA_LONG>(count);
_SGDOptimizer<T><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(
eta,
weights,
gradients,
weights_out,
gradients_out,
N);
}
#define SPECIALIZED_IMPL__SGDOptimizerImpl(T) \
template void SGDOptimizerImpl( \
cudaStream_t stream, \
const T* eta, \
const T* weights, \
const T* gradients, \
T* weights_out, \
T* gradients_out, \
size_t count);
SPECIALIZED_IMPL__SGDOptimizerImpl(float)
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <hip/hip_runtime.h>
namespace onnxruntime {
namespace rocm {
template <typename T>
void SGDOptimizerImpl(
hipStream_t stream,
const T* eta,
const T* weights,
const T* gradients,
T* weight_out,
T* gradients_out,
size_t count);
}
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <cuda_runtime.h>
namespace onnxruntime {
namespace cuda {
template <typename T>
void SGDOptimizerImpl(
cudaStream_t stream,
const T* eta,
const T* weights,
const T* gradients,
T* weight_out,
T* gradients_out,
size_t count);
}
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "orttraining/training_ops/rocm/optimizer/common.h"
#include "orttraining/training_ops/rocm/optimizer/sgd/sgd.h"
#include "orttraining/training_ops/rocm/optimizer/sgd/sgd_impl.h"
namespace onnxruntime {
namespace rocm {
ONNX_OPERATOR_KERNEL_EX(
SGDOptimizerV2,
kMSDomain,
1,
kRocmExecutionProvider,
(*KernelDefBuilder::Create())
.InputMemoryType(OrtMemTypeCPUInput, 0)
.InputMemoryType(OrtMemTypeCPUInput, 3)
.OutputMemoryType(OrtMemTypeCPUOutput, 0)
.Alias(1, 1) // Update weights in-place
.TypeConstraint("T1", DataTypeImpl::GetTensorType<float>())
.TypeConstraint("T_BOOL", DataTypeImpl::GetTensorType<bool>())
.TypeConstraint("S_WEIGHT", DataTypeImpl::AllFixedSizeSequenceTensorTypes())
.TypeConstraint("S_GRAD", DataTypeImpl::AllFixedSizeSequenceTensorTypes()),
SGDOptimizerV2);
Status SGDOptimizerV2::ComputeInternal(OpKernelContext* ctx) const {
SGDOptimizerV2Base::Prepare p;
ORT_RETURN_IF_ERROR(PrepareForCompute(ctx, p));
bool* updated_flag_ptr = p.update_completed->template MutableData<bool>();
// Currently placed on CPU, need revisit when we had mixed precision training requirement.
const Tensor* update_signal = ctx->Input<Tensor>(3);
if (update_signal == nullptr || *update_signal->template Data<bool>()) {
typedef typename ToHipType<float>::MappedType HipT_FLOAT;
typedef SGDMTAFunctor<HipT_FLOAT, HipT_FLOAT> TFunctor;
TFunctor functor;
const float* lr_ptr = p.learning_rate->template Data<float>();
ORT_ENFORCE(lr_ptr);
launch_multi_tensor_functor<MTA_SGD_GROUP_SIZE, TFunctor>(
Stream(ctx), MTA_SGD_CHUNK_SIZE, p.grouped_tensor_sizes, p.grouped_tensor_pointers, functor,
*lr_ptr);
*updated_flag_ptr = true;
} else {
*updated_flag_ptr = false;
}
if (p.updated_weights != nullptr) {
ORT_RETURN_IF_ERROR(CopyIfNotSameROCMBuffer(ctx, p.num_of_weights, p.weights, p.updated_weights));
}
return Status::OK();
}
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "orttraining/training_ops/cuda/optimizer/common.h"
#include "orttraining/training_ops/cuda/optimizer/sgd/sgd.h"
#include "orttraining/training_ops/cuda/optimizer/sgd/sgd_impl.h"
namespace onnxruntime {
namespace cuda {
ONNX_OPERATOR_KERNEL_EX(
SGDOptimizerV2,
kMSDomain,
1,
kCudaExecutionProvider,
(*KernelDefBuilder::Create())
.InputMemoryType(OrtMemTypeCPUInput, 0)
.InputMemoryType(OrtMemTypeCPUInput, 3)
.OutputMemoryType(OrtMemTypeCPUOutput, 0)
.Alias(1, 1) // Update weights in-place
.TypeConstraint("T1", DataTypeImpl::GetTensorType<float>())
.TypeConstraint("T_BOOL", DataTypeImpl::GetTensorType<bool>())
.TypeConstraint("S_WEIGHT", DataTypeImpl::AllFixedSizeSequenceTensorTypes())
.TypeConstraint("S_GRAD", DataTypeImpl::AllFixedSizeSequenceTensorTypes()),
SGDOptimizerV2);
Status SGDOptimizerV2::ComputeInternal(OpKernelContext* ctx) const {
SGDOptimizerV2Base::Prepare p;
ORT_RETURN_IF_ERROR(PrepareForCompute(ctx, p));
bool* updated_flag_ptr = p.update_completed->template MutableData<bool>();
// Currently placed on CPU, need revisit when we had mixed precision training requirement.
const Tensor* update_signal = ctx->Input<Tensor>(3);
if (update_signal == nullptr || *update_signal->template Data<bool>()) {
typedef typename ToCudaType<float>::MappedType CudaT_FLOAT;
typedef SGDMTAFunctor<CudaT_FLOAT, CudaT_FLOAT> TFunctor;
TFunctor functor;
const float* lr_ptr = p.learning_rate->template Data<float>();
ORT_ENFORCE(lr_ptr);
launch_multi_tensor_functor<MTA_SGD_GROUP_SIZE, TFunctor>(
Stream(ctx), MTA_SGD_CHUNK_SIZE, p.grouped_tensor_sizes, p.grouped_tensor_pointers, functor,
*lr_ptr);
*updated_flag_ptr = true;
} else {
*updated_flag_ptr = false;
}
if (p.updated_weights != nullptr) {
ORT_RETURN_IF_ERROR(CopyIfNotSameCUDABuffer(ctx, p.num_of_weights, p.weights, p.updated_weights));
}
return Status::OK();
}
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/providers/rocm/rocm_kernel.h"
#include "orttraining/training_ops/cpu/optimizer/sgd/sgdbase.h"
namespace onnxruntime {
namespace rocm {
class SGDOptimizerV2 final : public RocmKernel, public contrib::SGDOptimizerV2Base {
public:
SGDOptimizerV2(const OpKernelInfo& info) : RocmKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/common/common.h"
#include "core/providers/cuda/cuda_kernel.h"
#include "orttraining/training_ops/cpu/optimizer/sgd/sgdbase.h"
namespace onnxruntime {
namespace cuda {
class SGDOptimizerV2 final : public CudaKernel, public contrib::SGDOptimizerV2Base {
public:
SGDOptimizerV2(const OpKernelInfo& info) : CudaKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace cuda
} // namespace onnxruntime
### |
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "orttraining/training_ops/rocm/optimizer/sgd/sgd_impl.h"
#include "core/providers/rocm/rocm_common.h"
#include "core/providers/rocm/cu_inc/common.cuh"
namespace onnxruntime {
namespace rocm {
template <typename T_WEIGHT, typename T_GRAD>
__global__ void SGDCompute(ChunkGroup<MTA_SGD_GROUP_SIZE> chunks, const float lr) {
const int block_idx = blockIdx.x;
T_WEIGHT* weight_chunk_ptr;
T_GRAD* grad_chunk_ptr;
int chunk_size;
const int tensor_idx = chunks.block_index_to_tensor_group_index[block_idx];
const int tensor_size = chunks.tensor_sizes[tensor_idx];
T_WEIGHT* weight_tensor_ptr = static_cast<T_WEIGHT*>(chunks.tensor_ptrs[0][tensor_idx]);
T_GRAD* grad_tensor_ptr = static_cast<T_GRAD*>(chunks.tensor_ptrs[1][tensor_idx]);
const int chunk_start_idx = chunks.block_index_to_chunk_start_index[block_idx];
// chunk_size is chunks.chunk_size if the loaded chunk is full. Otherwise (this
// chunk is the last one in the source tensor), the actual size is determined
// by the bound of the source tensor.
chunk_size = min(tensor_size, chunk_start_idx + chunks.chunk_size) - chunk_start_idx;
weight_chunk_ptr = weight_tensor_ptr + chunk_start_idx;
grad_chunk_ptr = grad_tensor_ptr + chunk_start_idx;
#pragma unroll 4
for (int i = threadIdx.x; i < chunk_size; i += blockDim.x) {
float w = static_cast<float>(weight_chunk_ptr[i]);
float g = static_cast<float>(grad_chunk_ptr[i]);
w = w + -lr * g;
// Update the new weight.
weight_chunk_ptr[i] = static_cast<T_WEIGHT>(w);
}
}
template <typename T_WEIGHT, typename T_GRAD>
void SGDMTAFunctor<T_WEIGHT, T_GRAD>::operator()(hipStream_t stream,
ChunkGroup<MTA_SGD_GROUP_SIZE> chunks,
const float lr) {
const int block_count = chunks.chunk_count;
const int thread_count = ChunkGroup<MTA_SGD_GROUP_SIZE>::thread_count_per_block;
SGDCompute<T_WEIGHT, T_GRAD><<<block_count, thread_count, 0, stream>>>(chunks, lr);
}
#define INSTANTIATE_SGD_FUNCTOR(T_WEIGHT, T_GRAD) \
template void SGDMTAFunctor<T_WEIGHT, T_GRAD>::operator()(hipStream_t stream, \
ChunkGroup<MTA_SGD_GROUP_SIZE> chunks, \
const float lr); \
template __global__ void SGDCompute<T_WEIGHT, T_GRAD>(ChunkGroup<MTA_SGD_GROUP_SIZE> chunks, \
const float lr);
INSTANTIATE_SGD_FUNCTOR(float, float)
#undef INSTANTIATE_SGD_FUNCTOR
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "orttraining/training_ops/cuda/optimizer/sgd/sgd_impl.h"
#include "core/providers/cuda/cuda_common.h"
#include "core/providers/cuda/cu_inc/common.cuh"
namespace onnxruntime {
namespace cuda {
template <typename T_WEIGHT, typename T_GRAD>
__global__ void SGDCompute(ChunkGroup<MTA_SGD_GROUP_SIZE> chunks, const float lr) {
const int block_idx = blockIdx.x;
T_WEIGHT* weight_chunk_ptr;
T_GRAD* grad_chunk_ptr;
int chunk_size;
const int tensor_idx = chunks.block_index_to_tensor_group_index[block_idx];
const int tensor_size = chunks.tensor_sizes[tensor_idx];
T_WEIGHT* weight_tensor_ptr = static_cast<T_WEIGHT*>(chunks.tensor_ptrs[0][tensor_idx]);
T_GRAD* grad_tensor_ptr = static_cast<T_GRAD*>(chunks.tensor_ptrs[1][tensor_idx]);
const int chunk_start_idx = chunks.block_index_to_chunk_start_index[block_idx];
// chunk_size is chunks.chunk_size if the loaded chunk is full. Otherwise (this
// chunk is the last one in the source tensor), the actual size is determined
// by the bound of the source tensor.
chunk_size = min(tensor_size, chunk_start_idx + chunks.chunk_size) - chunk_start_idx;
weight_chunk_ptr = weight_tensor_ptr + chunk_start_idx;
grad_chunk_ptr = grad_tensor_ptr + chunk_start_idx;
#pragma unroll 4
for (int i = threadIdx.x; i < chunk_size; i += blockDim.x) {
float w = static_cast<float>(weight_chunk_ptr[i]);
float g = static_cast<float>(grad_chunk_ptr[i]);
w = w + -lr * g;
// Update the new weight.
weight_chunk_ptr[i] = static_cast<T_WEIGHT>(w);
}
}
template <typename T_WEIGHT, typename T_GRAD>
void SGDMTAFunctor<T_WEIGHT, T_GRAD>::operator()(cudaStream_t stream,
ChunkGroup<MTA_SGD_GROUP_SIZE> chunks,
const float lr) {
const int block_count = chunks.chunk_count;
const int thread_count = ChunkGroup<MTA_SGD_GROUP_SIZE>::thread_count_per_block;
SGDCompute<T_WEIGHT, T_GRAD><<<block_count, thread_count, 0, stream>>>(chunks, lr);
}
#define INSTANTIATE_SGD_FUNCTOR(T_WEIGHT, T_GRAD) \
template void SGDMTAFunctor<T_WEIGHT, T_GRAD>::operator()(cudaStream_t stream, \
ChunkGroup<MTA_SGD_GROUP_SIZE> chunks, \
const float lr); \
template __global__ void SGDCompute<T_WEIGHT, T_GRAD>(ChunkGroup<MTA_SGD_GROUP_SIZE> chunks, \
const float lr);
INSTANTIATE_SGD_FUNCTOR(float, float)
#undef INSTANTIATE_SGD_FUNCTOR
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/multi_tensor/common.cuh"
namespace onnxruntime {
namespace rocm {
#define MTA_SGD_GROUP_SIZE 2
#define MTA_SGD_CHUNK_SIZE 2048 * 32
template <typename T_WEIGHT, typename T_GRAD>
struct SGDMTAFunctor {
void operator()(hipStream_t stream, ChunkGroup<MTA_SGD_GROUP_SIZE> chunks, const float lr);
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/multi_tensor/common.cuh"
namespace onnxruntime {
namespace cuda {
#define MTA_SGD_GROUP_SIZE 2
#define MTA_SGD_CHUNK_SIZE 2048 * 32
template <typename T_WEIGHT, typename T_GRAD>
struct SGDMTAFunctor {
void operator()(cudaStream_t stream, ChunkGroup<MTA_SGD_GROUP_SIZE> chunks, const float lr);
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
template <typename T1, typename T2>
class MatMulInteger final : public RocmKernel {
using Base = RocmKernel;
public:
MatMulInteger(const OpKernelInfo& info) : RocmKernel(info) {
has_a_zero_point_ = false;
has_b_zero_point_ = false;
if (info.GetInputCount() > 2) {
has_a_zero_point_ = true;
}
if (info.GetInputCount() > 3) {
has_b_zero_point_ = true;
}
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
bool has_a_zero_point_;
bool has_b_zero_point_;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
template <typename T1, typename T2>
class MatMulInteger final : public CudaKernel {
using Base = CudaKernel;
public:
MatMulInteger(const OpKernelInfo& info) : CudaKernel(info) {
has_a_zero_point_ = false;
has_b_zero_point_ = false;
if (info.GetInputCount() > 2) {
has_a_zero_point_ = true;
}
if (info.GetInputCount() > 3) {
has_b_zero_point_ = true;
}
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
bool has_a_zero_point_;
bool has_b_zero_point_;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <string>
#include "core/common/common.h"
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
template <typename T>
class FakeQuant final : public RocmKernel {
public:
FakeQuant(const OpKernelInfo& info) : RocmKernel(info) {
info.GetAttrOrDefault("quant_min", &quant_min_, static_cast<decltype(quant_min_)>(0));
info.GetAttrOrDefault("quant_max", &quant_max_, static_cast<decltype(quant_max_)>(255));
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
int64_t quant_min_;
int64_t quant_max_;
};
template <typename T>
class FakeQuantGrad final : public RocmKernel {
public:
FakeQuantGrad(const OpKernelInfo& info) : RocmKernel(info) {
}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <string>
#include "core/common/common.h"
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
class FakeQuant final : public CudaKernel {
public:
FakeQuant(const OpKernelInfo& info) : CudaKernel(info) {
info.GetAttrOrDefault("quant_min", &quant_min_, static_cast<decltype(quant_min_)>(0));
info.GetAttrOrDefault("quant_max", &quant_max_, static_cast<decltype(quant_max_)>(255));
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
int64_t quant_min_;
int64_t quant_max_;
};
template <typename T>
class FakeQuantGrad final : public CudaKernel {
public:
FakeQuantGrad(const OpKernelInfo& info) : CudaKernel(info) {
}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
namespace onnxruntime {
namespace rocm {
template <typename T>
void FakeQuantPerTensor(hipStream_t stream, const int64_t num_elements, const T* input_data, const T quant_scale,
const T quant_zero_point, const int64_t quant_min, const int64_t quant_max,
T* fake_quantized_data, bool* quantization_mask_data);
template <typename T>
void FakeQuantGradImpl(hipStream_t stream, const int64_t num_elements, const T* dY_data,
const bool* gradient_mask_data, T* dX_data);
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <stdint.h>
namespace onnxruntime {
namespace cuda {
template <typename T>
void FakeQuantPerTensor(cudaStream_t stream, const int64_t num_elements, const T* input_data, const T quant_scale,
const T quant_zero_point, const int64_t quant_min, const int64_t quant_max,
T* fake_quantized_data, bool* quantization_mask_data);
template <typename T>
void FakeQuantGradImpl(cudaStream_t stream, const int64_t num_elements, const T* dY_data,
const bool* gradient_mask_data, T* dX_data);
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "orttraining/training_ops/rocm/reduction/all.h"
#include "orttraining/training_ops/rocm/reduction/all_impl.h"
namespace onnxruntime {
namespace rocm {
#define REGISTER_ALL_KERNEL_TYPED(T) \
ONNX_OPERATOR_TYPED_KERNEL_EX( \
All, \
kMSDomain, \
1, \
T, \
kRocmExecutionProvider, \
(*KernelDefBuilder::Create()).TypeConstraint("T", DataTypeImpl::GetTensorType<T>()), \
All<T>);
template <typename T>
Status All<T>::ComputeInternal(OpKernelContext* ctx) const {
const Tensor& input = *ctx->Input<Tensor>(0);
Tensor& output = *ctx->Output(0, {1});
const auto size = input.Shape().Size();
ORT_ENFORCE(size <= std::numeric_limits<int>::max(), "Number of reduced elements (",
size, ") exceeds the max allowed value (", std::numeric_limits<int>::max(), ").");
LaunchAllKernel(
Stream(ctx),
input.Data<T>(),
static_cast<int>(size),
output.MutableData<bool>());
return Status::OK();
}
REGISTER_ALL_KERNEL_TYPED(bool)
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "orttraining/training_ops/cuda/reduction/all.h"
#include "orttraining/training_ops/cuda/reduction/all_impl.h"
namespace onnxruntime {
namespace cuda {
#define REGISTER_ALL_KERNEL_TYPED(T) \
ONNX_OPERATOR_TYPED_KERNEL_EX( \
All, \
kMSDomain, \
1, \
T, \
kCudaExecutionProvider, \
(*KernelDefBuilder::Create()).TypeConstraint("T", DataTypeImpl::GetTensorType<T>()), \
All<T>);
template <typename T>
Status All<T>::ComputeInternal(OpKernelContext* ctx) const {
const Tensor& input = *ctx->Input<Tensor>(0);
Tensor& output = *ctx->Output(0, {1});
const auto size = input.Shape().Size();
ORT_ENFORCE(size <= std::numeric_limits<int>::max(), "Number of reduced elements (",
size, ") exceeds the max allowed value (", std::numeric_limits<int>::max(), ").");
LaunchAllKernel(
Stream(ctx),
input.Data<T>(),
static_cast<int>(size),
output.MutableData<bool>());
return Status::OK();
}
REGISTER_ALL_KERNEL_TYPED(bool)
} // namespace cuda
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/rocm/rocm_kernel.h"
namespace onnxruntime {
namespace rocm {
template <typename T>
class All final : public RocmKernel {
public:
All(const OpKernelInfo& info) : RocmKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace rocm
} // namespace onnxruntime
### |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/cuda_kernel.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
class All final : public CudaKernel {
public:
All(const OpKernelInfo& info) : CudaKernel(info) {}
Status ComputeInternal(OpKernelContext* context) const override;
};
} // namespace cuda
} // namespace onnxruntime
### |