generic_scaled_masked_softmax.cpp 2.9 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283
  1. /* coding=utf-8
  2. * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <cuda_fp16.h>
  17. #include <torch/extension.h>
  18. #include <vector>
  19. namespace multihead_attn
  20. {
  21. namespace fused_softmax
  22. {
  23. namespace generic_scaled_masked_softmax
  24. {
  25. torch::Tensor fwd_cuda(
  26. torch::Tensor const &input,
  27. torch::Tensor const &mask,
  28. float scale_factor);
  29. torch::Tensor bwd_cuda(
  30. torch::Tensor const &output_grads,
  31. torch::Tensor const &softmax_results,
  32. float scale_factor);
  33. torch::Tensor fwd(
  34. torch::Tensor const &input,
  35. torch::Tensor const &mask,
  36. float scale_factor)
  37. {
  38. TORCH_CHECK(input.dim() == 4, "expected 4D tensor");
  39. TORCH_CHECK((input.scalar_type() == at::ScalarType::Half) ||
  40. (input.scalar_type() == at::ScalarType::BFloat16),
  41. "Only fp16 and bf16 are supported");
  42. TORCH_CHECK(mask.dim() == 4, "expected 4D tensor");
  43. return fwd_cuda(input, mask, scale_factor);
  44. }
  45. torch::Tensor bwd(
  46. torch::Tensor const &output_grads,
  47. torch::Tensor const &softmax_results,
  48. float scale_factor)
  49. {
  50. TORCH_CHECK(output_grads.dim() == 4, "expected 3D tensor");
  51. TORCH_CHECK(softmax_results.dim() == 4, "expected 3D tensor");
  52. TORCH_CHECK((output_grads.scalar_type() == at::ScalarType::Half) ||
  53. (output_grads.scalar_type() == at::ScalarType::BFloat16),
  54. "Only fp16 and bf16 are supported");
  55. TORCH_CHECK((softmax_results.scalar_type() == at::ScalarType::Half) ||
  56. (softmax_results.scalar_type() == at::ScalarType::BFloat16),
  57. "Only fp16 and bf16 are supported");
  58. return bwd_cuda(output_grads, softmax_results, scale_factor);
  59. }
  60. } // end namespace generic_scaled_masked_softmax
  61. } // end namespace fused_softmax
  62. } // end namespace multihead_attn
  63. PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
  64. m.def("forward",
  65. &multihead_attn::fused_softmax::generic_scaled_masked_softmax::fwd,
  66. "Self Multihead Attention scaled, time masked softmax -- Forward.");
  67. m.def("backward",
  68. &multihead_attn::fused_softmax::generic_scaled_masked_softmax::bwd,
  69. "Self Multihead Attention scaled, time masked softmax -- Backward.");
  70. }