3
0

scaled_masked_softmax_cuda.cu 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121
  1. /* coding=utf-8
  2. * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <ATen/ATen.h>
  17. #include <cuda.h>
  18. #include <cuda_runtime.h>
  19. #include <cuda_fp16.h>
  20. #include <cuda_profiler_api.h>
  21. #include <ATen/cuda/CUDAContext.h>
  22. #include <torch/extension.h>
  23. #include "scaled_masked_softmax.h"
  24. #include "type_shim.h"
  25. namespace multihead_attn {
  26. namespace fused_softmax {
  27. namespace scaled_masked_softmax {
  28. int get_batch_per_block_cuda(int query_seq_len, int key_seq_len, int batches, int attn_heads){
  29. return get_batch_per_block(query_seq_len, key_seq_len, batches, attn_heads);
  30. }
  31. torch::Tensor fwd_cuda(
  32. torch::Tensor const& input,
  33. torch::Tensor const& mask,
  34. float scale_factor)
  35. {
  36. // input is a 4d tensor with dimensions [batches, attn_heads, seq_len, seq_len]
  37. const int batches = input.size(0);
  38. const int pad_batches = mask.size(0);
  39. const int attn_heads = input.size(1);
  40. const int query_seq_len = input.size(2);
  41. const int key_seq_len = input.size(3);
  42. TORCH_INTERNAL_ASSERT(key_seq_len <= 16384);
  43. TORCH_INTERNAL_ASSERT(query_seq_len > 1);
  44. TORCH_INTERNAL_ASSERT(pad_batches == 1 || pad_batches == batches);
  45. TORCH_INTERNAL_ASSERT(mask.size(1) == 1);
  46. TORCH_INTERNAL_ASSERT(mask.size(2) == query_seq_len);
  47. TORCH_INTERNAL_ASSERT(mask.size(3) == key_seq_len);
  48. // Output
  49. auto act_options = input.options().requires_grad(false);
  50. torch::Tensor softmax_results =
  51. torch::empty({batches, attn_heads, query_seq_len, key_seq_len}, act_options);
  52. // Softmax Intermediate Result Ptr
  53. void* input_ptr = static_cast<void*>(input.data_ptr());
  54. void* mask_ptr = static_cast<void*>(mask.data_ptr());
  55. void* softmax_results_ptr = static_cast<void*>(softmax_results.data_ptr());
  56. DISPATCH_HALF_AND_BFLOAT(
  57. input.scalar_type(),
  58. "dispatch_scaled_masked_softmax_forward",
  59. dispatch_scaled_masked_softmax_forward<scalar_t, scalar_t, float>(
  60. reinterpret_cast<scalar_t*>(softmax_results_ptr),
  61. reinterpret_cast<const scalar_t*>(input_ptr),
  62. reinterpret_cast<const uint8_t*>(mask_ptr),
  63. scale_factor,
  64. query_seq_len,
  65. key_seq_len,
  66. batches,
  67. attn_heads,
  68. pad_batches
  69. );
  70. );
  71. return softmax_results;
  72. }
  73. torch::Tensor bwd_cuda(
  74. torch::Tensor const& output_grads_,
  75. torch::Tensor const& softmax_results_,
  76. float scale_factor) {
  77. auto output_grads = output_grads_.contiguous();
  78. auto softmax_results = softmax_results_.contiguous();
  79. //output grads is a 4d tensor with dimensions [batches, attn_heads, seq_len, seq_len]
  80. const int batches = output_grads.size(0);
  81. const int attn_heads = output_grads.size(1);
  82. const int query_seq_len = output_grads.size(2);
  83. const int key_seq_len = output_grads.size(3);
  84. auto act_options = output_grads.options().requires_grad(false);
  85. torch::Tensor input_grads =
  86. torch::empty({batches, attn_heads, query_seq_len, key_seq_len}, act_options);
  87. void* input_grads_ptr = static_cast<void*>(input_grads.data_ptr());
  88. void* output_grads_ptr = static_cast<void*>(output_grads.data_ptr());
  89. //Softmax Grad
  90. DISPATCH_HALF_AND_BFLOAT(
  91. output_grads_.scalar_type(),
  92. "dispatch_scaled_masked_softmax_backward",
  93. dispatch_scaled_masked_softmax_backward<scalar_t, scalar_t, float>(
  94. reinterpret_cast<scalar_t*>(input_grads_ptr),
  95. reinterpret_cast<scalar_t*>(output_grads_ptr),
  96. reinterpret_cast<scalar_t const*>(softmax_results.data_ptr()),
  97. scale_factor,
  98. query_seq_len,
  99. key_seq_len,
  100. batches,
  101. attn_heads
  102. );
  103. );
  104. return input_grads;
  105. }
  106. }
  107. }
  108. }