scaled_softmax.cpp 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475
  1. /* coding=utf-8
  2. * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <cuda_fp16.h>
  17. #include <torch/extension.h>
  18. #include <vector>
  19. namespace multihead_attn {
  20. namespace fused_softmax {
  21. namespace scaled_softmax {
  22. torch::Tensor fwd_cuda(
  23. torch::Tensor const& input,
  24. float scale_factor);
  25. torch::Tensor bwd_cuda(
  26. torch::Tensor const& output_grads,
  27. torch::Tensor const& softmax_results,
  28. float scale_factor);
  29. torch::Tensor fwd(
  30. torch::Tensor const& input,
  31. float scale_factor) {
  32. TORCH_CHECK(input.dim() == 4, "expected 4D tensor");
  33. TORCH_CHECK((input.scalar_type() == at::ScalarType::Half) ||
  34. (input.scalar_type() == at::ScalarType::BFloat16),
  35. "Only fp16 and bf16 are supported");
  36. return fwd_cuda(input, scale_factor);
  37. }
  38. torch::Tensor bwd(
  39. torch::Tensor const& output_grads,
  40. torch::Tensor const& softmax_results,
  41. float scale_factor) {
  42. TORCH_CHECK(output_grads.dim() == 4, "expected 3D tensor");
  43. TORCH_CHECK(softmax_results.dim() == 4, "expected 3D tensor");
  44. TORCH_CHECK((output_grads.scalar_type() == at::ScalarType::Half) ||
  45. (output_grads.scalar_type() == at::ScalarType::BFloat16),
  46. "Only fp16 and bf16 are supported");
  47. TORCH_CHECK((softmax_results.scalar_type() == at::ScalarType::Half) ||
  48. (softmax_results.scalar_type() == at::ScalarType::BFloat16),
  49. "Only fp16 and bf16 are supported");
  50. return bwd_cuda(output_grads, softmax_results, scale_factor);
  51. }
  52. } // end namespace scaled_softmax
  53. } // end namespace fused_softmax
  54. } // end namespace multihead_attn
  55. PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
  56. m.def("forward",
  57. &multihead_attn::fused_softmax::scaled_softmax::fwd,
  58. "Self Multihead Attention scaled, softmax -- Forward.");
  59. m.def("backward",
  60. &multihead_attn::fused_softmax::scaled_softmax::bwd,
  61. "Self Multihead Attention scaled, softmax -- Backward.");
  62. }