3
0

generic_scaled_masked_softmax.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384
  1. /* coding=utf-8
  2. * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #pragma once
  17. #include <assert.h>
  18. #include <cuda_fp16.h>
  19. #include <cfloat>
  20. #include <limits>
  21. #include <stdint.h>
  22. #include <cuda_fp16.h>
  23. #include <c10/macros/Macros.h>
  24. namespace {
  25. template<typename T>
  26. struct Add {
  27. __device__ __forceinline__ T operator()(T a, T b) const {
  28. return a + b;
  29. }
  30. };
  31. template<typename T>
  32. struct Max {
  33. __device__ __forceinline__ T operator()(T a, T b) const {
  34. return a < b ? b : a;
  35. }
  36. };
  37. template <typename T>
  38. __device__ __forceinline__ T WARP_SHFL_DOWN_NATIVE(T value, int laneMask, int width = warpSize, unsigned int mask = 0xffffffff)
  39. {
  40. #if CUDA_VERSION >= 9000
  41. return __shfl_down_sync(mask, value, laneMask, width);
  42. #else
  43. return __shfl_down(value, laneMask, width);
  44. #endif
  45. }
  46. template <typename acc_t, int WARP_SIZE, template<typename> class ReduceOp>
  47. __device__ __forceinline__ acc_t warp_reduce_new(acc_t val) {
  48. ReduceOp<acc_t> r;
  49. #pragma unroll
  50. for (int offset = WARP_SIZE / 2; offset > 0; offset /= 2)
  51. {
  52. val = r(val, WARP_SHFL_DOWN_NATIVE(val, offset, WARP_SIZE));
  53. }
  54. return val;
  55. }
  56. template <typename input_t, typename output_t, typename acc_t, int log2_elements>
  57. __global__ void scaled_masked_softmax_warp_backward_new(
  58. output_t *gradInput, //[batches, attn_heads, q_len, k_len]
  59. input_t *grad,
  60. const input_t *output, //[batches, attn_heads, q_len, k_len]
  61. acc_t scale,
  62. int element_count)
  63. {
  64. int threads_per_block = blockDim.x;
  65. //the first element_count*2 elements are used for cache, the last 128 is used for reduction
  66. extern __shared__ acc_t shared_data[];
  67. input_t *local_data = (input_t *)shared_data;
  68. input_t *output_data = &local_data[element_count];
  69. // maximum shared cached 128, enough for 4096 elements reduction into 4096/32= 128 elements
  70. acc_t *shared = (acc_t *)(&(local_data[element_count*2]));
  71. int num_reductions = (element_count - 1) / threads_per_block + 1;
  72. int offset = blockIdx.x * element_count;
  73. int local_idx = threadIdx.x;
  74. int lane = threadIdx.x % C10_WARP_SIZE;
  75. int wid = threadIdx.x / C10_WARP_SIZE;
  76. int warps_per_thread_block = threads_per_block / C10_WARP_SIZE;
  77. // load the data to local data
  78. acc_t val = 0.0;
  79. for (int i = 0; i < num_reductions; i++){
  80. if (i*threads_per_block + local_idx < element_count){
  81. val = output[offset + i*threads_per_block + local_idx];
  82. output_data[i*threads_per_block + local_idx] = val;
  83. local_data[i*threads_per_block + local_idx] = val * grad[offset + i*threads_per_block + local_idx];
  84. }
  85. __syncthreads();
  86. }
  87. // find the sum
  88. for (int i = local_idx; i < (element_count - 1) / C10_WARP_SIZE + 1; i += threads_per_block){
  89. shared[i] = 0.0;
  90. }
  91. __syncthreads();
  92. #pragma unroll
  93. for (int i = 0; i < num_reductions; i++){
  94. if (i*threads_per_block + local_idx < element_count){
  95. val = local_data[i*threads_per_block + local_idx];
  96. }
  97. else{
  98. val = 0.0;
  99. }
  100. __syncthreads();
  101. val = warp_reduce_new<acc_t, C10_WARP_SIZE, Add>(val);
  102. if (lane==0 && wid + warps_per_thread_block * i < (element_count - 1) / C10_WARP_SIZE + 1) {
  103. shared[wid + warps_per_thread_block*i] = val;
  104. }
  105. __syncthreads();
  106. }
  107. // final shared reduction
  108. int shared_mem_len = (element_count - 1) / C10_WARP_SIZE + 1;
  109. int num_warps = (shared_mem_len - 1) / C10_WARP_SIZE + 1;
  110. while ( shared_mem_len > 1 ){
  111. #pragma unroll
  112. for (int i = 0; i < num_reductions; i++){
  113. if (i*threads_per_block + local_idx < shared_mem_len){
  114. val = shared[i*threads_per_block + local_idx];
  115. }
  116. else{
  117. val = 0.0;
  118. }
  119. __syncthreads();
  120. val = warp_reduce_new<acc_t, C10_WARP_SIZE, Add>(val);
  121. if (lane==0) {
  122. shared[wid + warps_per_thread_block * i] = val;
  123. }
  124. __syncthreads();
  125. }
  126. shared_mem_len = num_warps;
  127. num_warps = (shared_mem_len - 1) / C10_WARP_SIZE + 1;
  128. }
  129. val = shared[0];
  130. #pragma unroll
  131. for (int i = local_idx; i < element_count; i += threads_per_block){
  132. gradInput[offset + i] = (output_t)(scale*(local_data[i] - output_data[i]*val));
  133. }
  134. }
  135. } // end of anonymous namespace
  136. template<typename input_t, typename output_t, typename acc_t>
  137. void dispatch_scaled_masked_softmax_backward_new(
  138. output_t *grad_input,
  139. input_t *grad,
  140. const input_t *output,
  141. const acc_t scale,
  142. int query_seq_len,
  143. int key_seq_len,
  144. int batches,
  145. int attn_heads)
  146. {
  147. if (key_seq_len == 0)
  148. {
  149. return;
  150. }
  151. else
  152. {
  153. int batch_count = batches * attn_heads * query_seq_len;
  154. // use 128 threads per block to maximize gpu utilization
  155. constexpr int threads_per_block = 128;
  156. int num_warps = (key_seq_len - 1) / C10_WARP_SIZE + 1;
  157. dim3 blocks(batch_count, 1, 1);
  158. dim3 threads(threads_per_block, 1, 1);
  159. scaled_masked_softmax_warp_backward_new<input_t, output_t, acc_t, 12>
  160. <<<blocks, threads, sizeof(input_t)*key_seq_len*2 + sizeof(acc_t)*num_warps, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, key_seq_len);
  161. }
  162. }
  163. /*
  164. * Extended softmax (from native aten pytorch) with following additional features
  165. * 1) input scaling
  166. * 2) Explicit masking
  167. */
  168. template <typename input_t, typename output_t, typename acc_t>
  169. __global__ void scaled_masked_softmax_warp_forward_new(
  170. output_t *dst,
  171. const input_t *src,
  172. const uint8_t *mask,
  173. const acc_t scale,
  174. int query_len, // query_len
  175. int attn_heads,
  176. int element_count, // key_len
  177. int pad_batches) // mask batch size
  178. {
  179. // min threawds_per_block has to be bigger than 128
  180. int threads_per_block = blockDim.x;
  181. // the first element_count is used for cache, the last 128 is used for reduction
  182. extern __shared__ acc_t local_data[];
  183. // maximum shared cached 128, enough for 4096 elements reduction into 4096/32= 128 elements
  184. acc_t *shared = &(local_data[element_count]);
  185. // number of 1024 threads reductions
  186. int num_reductions = (element_count - 1) / threads_per_block + 1;
  187. int offset = blockIdx.x * element_count;
  188. int mask_offset;
  189. int query_id = blockIdx.x % query_len;
  190. if (pad_batches == 1){
  191. // broadcaste the mask tensor
  192. mask_offset = query_id * element_count;
  193. }
  194. else{
  195. int mask_batch_id = blockIdx.x / attn_heads / query_len;
  196. mask_offset = (mask_batch_id * query_len + query_id) * element_count;
  197. }
  198. int local_idx = threadIdx.x;
  199. int lane = threadIdx.x % C10_WARP_SIZE;
  200. int wid = threadIdx.x / C10_WARP_SIZE;
  201. int warps_per_thread_block = threads_per_block / C10_WARP_SIZE;
  202. // load the data to local data
  203. for (int i = local_idx; i < element_count; i += threads_per_block)
  204. {
  205. // TODO, use the copy vector method
  206. if (mask[mask_offset + i] == 1)
  207. {
  208. local_data[i] = -10000.0;
  209. }
  210. else
  211. {
  212. local_data[i] = src[offset + i] * scale;
  213. }
  214. }
  215. // first find the max value
  216. for (int i = local_idx; i < (element_count - 1) / C10_WARP_SIZE + 1; i += threads_per_block){
  217. shared[i] = -10000.0;
  218. }
  219. __syncthreads();
  220. acc_t val = -10000.0;
  221. #pragma unroll
  222. for (int i = 0; i < num_reductions; i++){
  223. if (i*threads_per_block + local_idx < element_count){
  224. val = local_data[i*threads_per_block + local_idx];
  225. }
  226. else{
  227. val = -10000.0;
  228. }
  229. __syncthreads();
  230. val = warp_reduce_new<acc_t, C10_WARP_SIZE, Max>(val);
  231. if (lane==0 && wid + warps_per_thread_block * i < (element_count - 1) / C10_WARP_SIZE + 1) {
  232. shared[wid + warps_per_thread_block*i] = val;
  233. }
  234. __syncthreads();
  235. }
  236. // final shared reduction
  237. int shared_mem_len = (element_count - 1) / C10_WARP_SIZE + 1;
  238. int num_warps = (shared_mem_len - 1) / C10_WARP_SIZE + 1;
  239. while ( shared_mem_len > 1 ){
  240. #pragma unroll
  241. for (int i = 0; i < num_reductions; i++){
  242. if (i*threads_per_block + local_idx < shared_mem_len){
  243. val = shared[i*threads_per_block + local_idx];
  244. }
  245. else{
  246. val = -10000.0;
  247. }
  248. __syncthreads();
  249. val = warp_reduce_new<acc_t, C10_WARP_SIZE, Max>(val);
  250. if (lane==0) {
  251. shared[wid + warps_per_thread_block * i] = val;
  252. }
  253. __syncthreads();
  254. }
  255. shared_mem_len = num_warps;
  256. num_warps = (shared_mem_len - 1) / C10_WARP_SIZE + 1;
  257. }
  258. acc_t reduced_val = shared[0];
  259. if (reduced_val < -10000.0 + 0.1){
  260. // if everything is masked, pay attention to nothing
  261. #pragma unroll
  262. for (int i = local_idx; i < element_count; i += threads_per_block){
  263. dst[offset + i] = 0.0;
  264. }
  265. return;
  266. }
  267. // update the values
  268. #pragma unroll
  269. for (int i = local_idx; i < element_count; i += threads_per_block){
  270. local_data[i] = std::exp(local_data[i] - reduced_val);
  271. }
  272. // find the sum
  273. for (int i = local_idx; i < (element_count - 1) / C10_WARP_SIZE + 1; i += threads_per_block){
  274. shared[i] = 0.0;
  275. }
  276. __syncthreads();
  277. #pragma unroll
  278. for (int i = 0; i < num_reductions; i++){
  279. if (i*threads_per_block + local_idx < element_count){
  280. val = local_data[i*threads_per_block + local_idx];
  281. }
  282. else{
  283. val = 0.0;
  284. }
  285. __syncthreads();
  286. val = warp_reduce_new<acc_t, C10_WARP_SIZE, Add>(val);
  287. if (lane==0 && wid + warps_per_thread_block * i < (element_count - 1) / C10_WARP_SIZE + 1) {
  288. shared[wid + warps_per_thread_block*i] = val;
  289. }
  290. __syncthreads();
  291. }
  292. shared_mem_len = (element_count - 1) / C10_WARP_SIZE + 1;
  293. num_warps = (shared_mem_len - 1) / C10_WARP_SIZE + 1;
  294. while ( shared_mem_len > 1 ){
  295. #pragma unroll
  296. for (int i = 0; i < num_reductions; i++){
  297. if (i*threads_per_block + local_idx < shared_mem_len){
  298. val = shared[i*threads_per_block + local_idx];
  299. }
  300. else{
  301. val = 0.0;
  302. }
  303. __syncthreads();
  304. val = warp_reduce_new<acc_t, C10_WARP_SIZE, Add>(val);
  305. if (lane==0) {
  306. shared[wid + warps_per_thread_block * i] = val;
  307. }
  308. __syncthreads();
  309. }
  310. shared_mem_len = num_warps;
  311. num_warps = (shared_mem_len - 1) / C10_WARP_SIZE + 1;
  312. }
  313. reduced_val = shared[0];
  314. #pragma unroll
  315. for (int i = local_idx; i < element_count; i += threads_per_block){
  316. dst[offset + i] = local_data[i] / reduced_val;
  317. }
  318. }
  319. template<typename input_t, typename output_t, typename acc_t>
  320. void dispatch_scaled_masked_softmax_forward_new(
  321. output_t *dst,
  322. const input_t *src,
  323. const uint8_t *mask,
  324. const input_t scale,
  325. int query_seq_len,
  326. int key_seq_len,
  327. int batches,
  328. int attn_heads,
  329. int pad_batches)
  330. {
  331. if (key_seq_len == 0) {
  332. return;
  333. } else {
  334. int batch_count = batches * attn_heads * query_seq_len;
  335. // use 128 threads per block to maximize gpu utilization
  336. constexpr int threads_per_block = 128;
  337. // calculate the needed shared memory
  338. int num_warps = (key_seq_len - 1) / C10_WARP_SIZE + 1;
  339. dim3 blocks(batch_count, 1, 1);
  340. dim3 threads(threads_per_block, 1, 1);
  341. scaled_masked_softmax_warp_forward_new<input_t, output_t, acc_t>
  342. <<<blocks, threads, sizeof(acc_t) * (key_seq_len + num_warps), at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, query_seq_len, attn_heads, key_seq_len, pad_batches);
  343. }
  344. }