|
| 1 | +/****************************************************************************** |
| 2 | + * Copyright (c) 2024, PAI, Alibaba Cloud. |
| 3 | + ******************************************************************************/ |
| 4 | + |
| 5 | +#pragma once |
| 6 | + |
| 7 | +#include "flash_fwd_launch_template.h" |
| 8 | +#include "flash_fwd_sparse_kernel.h" |
| 9 | + |
| 10 | +DEFINE_FLASH_FORWARD_KERNEL(flash_fwd_sparse_kernel, bool Is_dropout, bool Is_causal, bool Is_local, bool Has_alibi, bool Is_even_MN, bool Is_even_K, bool Is_softcap, bool Return_softmax) { |
| 11 | + #if defined(ARCH_SUPPORTS_FLASH) |
| 12 | + static_assert(!(Is_causal && Is_local)); // Enforce constraints |
| 13 | + flash::compute_sparse_attn<Kernel_traits, Is_dropout, Is_causal, Is_local, Has_alibi, Is_even_MN, Is_even_K, Is_softcap, Return_softmax>(params); |
| 14 | + #else |
| 15 | + FLASH_UNSUPPORTED_ARCH |
| 16 | + #endif |
| 17 | +} |
| 18 | + |
| 19 | +template<typename Kernel_traits, bool Is_dropout, bool Is_causal> |
| 20 | +void run_flash_sparse_fwd(Flash_fwd_params ¶ms, cudaStream_t stream) { |
| 21 | + constexpr size_t smem_size = Kernel_traits::kSmemSize; |
| 22 | + // printf("smem_size = %d\n", smem_size); |
| 23 | + |
| 24 | + // Work-around for gcc 7. It doesn't like nested BOOL_SWITCH. |
| 25 | + // https://github.com/kokkos/kokkos-kernels/issues/349 |
| 26 | + // https://github.com/HazyResearch/flash-attention/issues/21 |
| 27 | + |
| 28 | + const int num_m_block = (params.seqlen_q + Kernel_traits::kBlockM - 1) / Kernel_traits::kBlockM; |
| 29 | + dim3 grid(num_m_block, params.b, params.h); |
| 30 | + const bool is_even_K = params.d == Kernel_traits::kHeadDim; |
| 31 | + const bool return_softmax = params.p_ptr != nullptr; |
| 32 | + EVENK_SWITCH(is_even_K, IsEvenKConst, [&] { |
| 33 | + BOOL_SWITCH(return_softmax, ReturnSoftmaxConst, [&] { |
| 34 | + ALIBI_SWITCH(params.alibi_slopes_ptr != nullptr, Has_alibi, [&] { |
| 35 | + SOFTCAP_SWITCH(params.softcap > 0.0, Is_softcap, [&] { |
| 36 | + constexpr bool IsEvenMNConst = false; |
| 37 | + constexpr bool Is_local = false; |
| 38 | + // Will only return softmax if dropout, to reduce compilation time. |
| 39 | + // If not IsEvenKConst, we also set IsEvenMNConst to false to reduce number of templates. |
| 40 | + // If return_softmax, set IsEvenMNConst to false to reduce number of templates |
| 41 | + // If head dim > 128, set IsEvenMNConst to false to reduce number of templates |
| 42 | + // If Is_local, set Is_causal to false |
| 43 | + auto kernel = &flash_fwd_sparse_kernel<Kernel_traits, Is_dropout && !Is_softcap, Is_causal, Is_local && !Is_causal, Has_alibi, IsEvenMNConst && IsEvenKConst && !Is_local && !ReturnSoftmaxConst && Kernel_traits::kHeadDim <= 128, IsEvenKConst, Is_softcap, ReturnSoftmaxConst && Is_dropout && !Is_softcap>; |
| 44 | + // auto kernel = &flash_fwd_kernel<Kernel_traits, false, Is_causal, false, false, true, true, false>; |
| 45 | + // printf("IsEvenMNConst = %d, IsEvenKConst = %d, Is_local = %d, Is_causal = %d, ReturnSoftmaxConst = %d, Is_dropout = %d\n", int(IsEvenMNConst), int(IsEvenKConst), int(Is_local), int(Is_causal), int(ReturnSoftmaxConst), int(Is_dropout)); |
| 46 | + // auto kernel = &flash_fwd_kernel<Kernel_traits, false, Is_causal, false, true, true, false>; |
| 47 | + if (smem_size >= 48 * 1024) { |
| 48 | + C10_CUDA_CHECK(cudaFuncSetAttribute( |
| 49 | + kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size)); |
| 50 | + } |
| 51 | + // int ctas_per_sm; |
| 52 | + // cudaError status_ = cudaOccupancyMaxActiveBlocksPerMultiprocessor( |
| 53 | + // &ctas_per_sm, kernel, Kernel_traits::kNThreads, smem_size); |
| 54 | + // printf("smem_size = %d, CTAs per SM = %d\n", int(smem_size), ctas_per_sm); |
| 55 | + kernel<<<grid, Kernel_traits::kNThreads, smem_size, stream>>>(params); |
| 56 | + C10_CUDA_KERNEL_LAUNCH_CHECK(); |
| 57 | + }); |
| 58 | + }); |
| 59 | + }); |
| 60 | + }); |
| 61 | +} |
| 62 | + |
| 63 | +template<typename T, bool Is_causal> |
| 64 | +void run_mha_fwd_sparse_hdim32(Flash_fwd_params ¶ms, cudaStream_t stream) { |
| 65 | + constexpr static int Headdim = 32; |
| 66 | + DROPOUT_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] { |
| 67 | + run_flash_sparse_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); |
| 68 | + }); |
| 69 | +} |
| 70 | + |
| 71 | +template<typename T, bool Is_causal> |
| 72 | +void run_mha_fwd_sparse_hdim64(Flash_fwd_params ¶ms, cudaStream_t stream) { |
| 73 | + constexpr static int Headdim = 64; |
| 74 | + DROPOUT_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] { |
| 75 | + run_flash_sparse_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); |
| 76 | + }); |
| 77 | +} |
| 78 | + |
| 79 | +template<typename T, bool Is_causal> |
| 80 | +void run_mha_fwd_sparse_hdim96(Flash_fwd_params ¶ms, cudaStream_t stream) { |
| 81 | + constexpr static int Headdim = 96; |
| 82 | + DROPOUT_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] { |
| 83 | + run_flash_sparse_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); |
| 84 | + }); |
| 85 | +} |
| 86 | + |
| 87 | +template<typename T, bool Is_causal> |
| 88 | +void run_mha_fwd_sparse_hdim128(Flash_fwd_params ¶ms, cudaStream_t stream) { |
| 89 | + constexpr static int Headdim = 128; |
| 90 | + DROPOUT_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] { |
| 91 | + run_flash_sparse_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); |
| 92 | + }); |
| 93 | +} |
| 94 | + |
| 95 | +template<typename T, bool Is_causal> |
| 96 | +void run_mha_fwd_sparse_hdim160(Flash_fwd_params ¶ms, cudaStream_t stream) { |
| 97 | + constexpr static int Headdim = 160; |
| 98 | + DROPOUT_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] { |
| 99 | + run_flash_sparse_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); |
| 100 | + }); |
| 101 | +} |
| 102 | + |
| 103 | +template<typename T, bool Is_causal> |
| 104 | +void run_mha_fwd_sparse_hdim192(Flash_fwd_params ¶ms, cudaStream_t stream) { |
| 105 | + constexpr static int Headdim = 192; |
| 106 | + DROPOUT_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] { |
| 107 | + run_flash_sparse_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); |
| 108 | + }); |
| 109 | +} |
| 110 | + |
| 111 | +template<typename T, bool Is_causal> |
| 112 | +void run_mha_fwd_sparse_hdim224(Flash_fwd_params ¶ms, cudaStream_t stream) { |
| 113 | + constexpr static int Headdim = 224; |
| 114 | + DROPOUT_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] { |
| 115 | + run_flash_sparse_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); |
| 116 | + }); |
| 117 | +} |
| 118 | + |
| 119 | +template<typename T, bool Is_causal> |
| 120 | +void run_mha_fwd_sparse_hdim256(Flash_fwd_params ¶ms, cudaStream_t stream) { |
| 121 | + constexpr static int Headdim = 256; |
| 122 | + DROPOUT_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] { |
| 123 | + run_flash_sparse_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); |
| 124 | + }); |
| 125 | +} |
0 commit comments