Untitled
unknown
plain_text
a year ago
43 kB
6
Indexable
#$ gcc -std=c++17 -D__CUDA_ARCH_LIST__=750 -D__NV_LEGACY_LAUNCH -E -x c++ -D__CUDACC__ -D__NVCC__ -D__CUDACC_EXTENDED_LAMBDA__ -D__CUDACC_RELAXED_CONSTEXPR__ -O3 -I"cutlass/include" "-I/opt/cuda/bin/../targets/x86_64-linux/include" -U "__CUDA_NO_HALF_OPERATORS__" -U "__CUDA_NO_HALF_CONVERSIONS__" -U "__CUDA_NO_HALF2_OPERATORS__" -U "__CUDA_NO_BFLOAT16_CONVERSIONS__" -D__CUDACC_VER_MAJOR__=12 -D__CUDACC_VER_MINOR__=4 -D__CUDACC_VER_BUILD__=131 -D__CUDA_API_VER_MAJOR__=12 -D__CUDA_API_VER_MINOR__=4 -DCUDA_API_PER_THREAD_DEFAULT_STREAM=1 -D__NVCC_DIAG_PRAGMA_SUPPORT__=1 -include "cuda_runtime.h" -m64 "kernels/flash_fwd_hdim192_bf16_sm80.cu" -o "/home/ai/tmp/tmpxft_00005bb3_00000000-5_flash_fwd_hdim192_bf16_sm80.cpp4.ii" #$ cudafe++ --c++17 --gnu_version=130201 --display_error_number --orig_src_file_name "kernels/flash_fwd_hdim160_bf16_sm80.cu" --orig_src_path_name "/mnt/Home/ai/.cargo/git/checkouts/candle-c6a149c3b35a488f/997a57c/candle-flash-attn/kernels/flash_fwd_hdim160_bf16_sm80.cu" --allow_managed --extended-lambda --relaxed_constexpr --m64 --parse_templates --gen_c_file_name "/home/ai/tmp/tmpxft_00005bae_00000000-6_flash_fwd_hdim160_bf16_sm80.cudafe1.cpp" --stub_file_name "tmpxft_00005bae_00000000-6_flash_fwd_hdim160_bf16_sm80.cudafe1.stub.c" --gen_module_id_file --module_id_file_name "/home/ai/tmp/tmpxft_00005bae_00000000-4_flash_fwd_hdim160_bf16_sm80.module_id" "/home/ai/tmp/tmpxft_00005bae_00000000-5_flash_fwd_hdim160_bf16_sm80.cpp4.ii" # --error 0x1 -- thread '<unnamed>' panicked at /home/ai/.cargo/registry/src/index.crates.io-6f17d22bba15001f/bindgen_cuda-0.1.5/src/lib.rs:262:21: nvcc error while executing compiling: "nvcc" "--gpu-architecture=sm_75" "-c" "-o" "/mnt/Home/ai/mistral.rs/target/release/build/candle-flash-attn-c9bfae2e45a32870/out/flash_fwd_hdim256_bf16_sm80-21dd0f7dd998e506.o" "--default-stream" "per-thread" "-std=c++17" "-O3" "-U__CUDA_NO_HALF_OPERATORS__" "-U__CUDA_NO_HALF_CONVERSIONS__" "-U__CUDA_NO_HALF2_OPERATORS__" "-U__CUDA_NO_BFLOAT16_CONVERSIONS__" "-Icutlass/include" "--expt-relaxed-constexpr" "--expt-extended-lambda" "--use_fast_math" "--verbose" "kernels/flash_fwd_hdim256_bf16_sm80.cu" # stdout # stderr #$ cudafe++ --c++17 --gnu_version=130201 --display_error_number --orig_src_file_name "kernels/flash_fwd_hdim192_bf16_sm80.cu" --orig_src_path_name "/mnt/Home/ai/.cargo/git/checkouts/candle-c6a149c3b35a488f/997a57c/candle-flash-attn/kernels/flash_fwd_hdim192_bf16_sm80.cu" --allow_managed --extended-lambda --relaxed_constexpr --m64 --parse_templates --gen_c_file_name "/home/ai/tmp/tmpxft_00005bb3_00000000-6_flash_fwd_hdim192_bf16_sm80.cudafe1.cpp" --stub_file_name "tmpxft_00005bb3_00000000-6_flash_fwd_hdim192_bf16_sm80.cudafe1.stub.c" --gen_module_id_file --module_id_file_name "/home/ai/tmp/tmpxft_00005bb3_00000000-4_flash_fwd_hdim192_bf16_sm80.module_id" "/home/ai/tmp/tmpxft_00005bb3_00000000-5_flash_fwd_hdim192_bf16_sm80.cpp4.ii" kernels/flash_fwd_kernel.h(67): error: static assertion failed static_assert(decltype(size<1>(tPrP) == size<1>(tPgP))::value); ^ detected during: instantiation of "void flash::write_softmax_to_gmem(const cute::Tensor<Engine0, Layout0> &, cute::Tensor<Engine1, Layout1> &, TiledCopy) [with Engine0=cute::array_aligned<cutlass::half_t, 32UL, 16UL>, Layout0=cute::Layout<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::_1>, cute::_1, cute::_8>, cute::tuple<cute::tuple<cute::_1, cute::_2, cute::constant<int, 0>>, cute::constant<int, 0>, cute::_4>>, Engine1=cute::ViewEngine<cute::gmem_ptr<cutlass::half_t>>, Layout1=cute::Layout<cute::tuple<cute::tuple<cute::_1, cute::_8>, cute::constant<int, 4>, cute::_1>, cute::tuple<cute::tuple<cute::constant<int, 0>, cute::constant<int, 1>>, int, cute::constant<int, 0>>>, TiledCopy=cute::TiledCopy<cute::Copy_Atom<cute::DefaultCopy, cutlass::half_t>, cute::Layout<cute::tuple<cute::tuple<cute::constant<int, 8>, cute::constant<int, 32>>, cute::_8>, cute::tuple<cute::tuple<cute::_256, cute::_1>, cute::constant<int, 32>>>, cute::tuple<cute::_32, cute::constant<int, 64>>>]" at line 436 instantiation of "void flash::compute_attn_1rowblock<Kernel_traits,Is_dropout,Is_causal,Is_local,Has_alibi,Is_even_MN,Is_even_K,Return_softmax,Params>(const Params &, int, int, int) [with Kernel_traits=Flash_fwd_kernel_traits<192, 128, 64, 8, false, false, cutlass::half_t, Flash_kernel_traits<192, 128, 64, 8, cutlass::half_t>>, Is_dropout=false, Is_causal=true, Is_local=false, Has_alibi=true, Is_even_MN=false, Is_even_K=true, Return_softmax=false, Params=Flash_fwd_params]" at line 630 instantiation of "void flash::compute_attn<Kernel_traits,Is_dropout,Is_causal,Is_local,Has_alibi,Is_even_MN,Is_even_K,Return_softmax,Params>(const Params &) [with Kernel_traits=Flash_fwd_kernel_traits<192, 128, 64, 8, false, false, cutlass::half_t, Flash_kernel_traits<192, 128, 64, 8, cutlass::half_t>>, Is_dropout=false, Is_causal=true, Is_local=false, Has_alibi=true, Is_even_MN=false, Is_even_K=true, Return_softmax=false, Params=Flash_fwd_params]" at line 14 of kernels/flash_fwd_launch_template.h instantiation of "void flash_fwd_kernel<Kernel_traits,Is_dropout,Is_causal,Is_local,Has_alibi,Is_even_MN,Is_even_K,Return_softmax>(Flash_fwd_params) [with Kernel_traits=Flash_fwd_kernel_traits<192, 128, 64, 8, false, false, cutlass::half_t, Flash_kernel_traits<192, 128, 64, 8, cutlass::half_t>>, Is_dropout=false, Is_causal=true, Is_local=false, Has_alibi=true, Is_even_MN=false, Is_even_K=true, Return_softmax=false]" at line 31 of kernels/flash_fwd_launch_template.h instantiation of "void run_flash_fwd<Kernel_traits,Is_dropout,Is_causal>(Flash_fwd_params &, cudaStream_t) [with Kernel_traits=Flash_fwd_kernel_traits<192, 128, 64, 8, false, false, cutlass::half_t, Flash_kernel_traits<192, 128, 64, 8, cutlass::half_t>>, Is_dropout=false, Is_causal=true]" at line 186 of kernels/flash_fwd_launch_template.h instantiation of "void run_mha_fwd_hdim192<T>(Flash_fwd_params &, cudaStream_t) [with T=cutlass::half_t]" at line 9 of kernels/flash_fwd_hdim192_fp16_sm80.cu #$ gcc -std=c++17 -D__CUDA_ARCH__=750 -D__CUDA_ARCH_LIST__=750 -D__NV_LEGACY_LAUNCH -E -x c++ -DCUDA_DOUBLE_MATH_FUNCTIONS -D__CUDACC__ -D__NVCC__ -D__CUDACC_EXTENDED_LAMBDA__ -D__CUDACC_RELAXED_CONSTEXPR__ -O3 -I"cutlass/include" "-I/opt/cuda/bin/../targets/x86_64-linux/include" -U "__CUDA_NO_HALF_OPERATORS__" -U "__CUDA_NO_HALF_CONVERSIONS__" -U "__CUDA_NO_HALF2_OPERATORS__" -U "__CUDA_NO_BFLOAT16_CONVERSIONS__" -D__CUDACC_VER_MAJOR__=12 -D__CUDACC_VER_MINOR__=4 -D__CUDACC_VER_BUILD__=131 -D__CUDA_API_VER_MAJOR__=12 -D__CUDA_API_VER_MINOR__=4 -DCUDA_API_PER_THREAD_DEFAULT_STREAM=1 -D__NVCC_DIAG_PRAGMA_SUPPORT__=1 -include "cuda_runtime.h" -m64 "kernels/flash_fwd_hdim160_bf16_sm80.cu" -o "/home/ai/tmp/tmpxft_00005bae_00000000-7_flash_fwd_hdim160_bf16_sm80.cpp1.ii" #$ gcc -std=c++17 -D__CUDA_ARCH__=750 -D__CUDA_ARCH_LIST__=750 -D__NV_LEGACY_LAUNCH -E -x c++ -DCUDA_DOUBLE_MATH_FUNCTIONS -D__CUDACC__ -D__NVCC__ -D__CUDACC_EXTENDED_LAMBDA__ -D__CUDACC_RELAXED_CONSTEXPR__ -O3 -I"cutlass/include" "-I/opt/cuda/bin/../targets/x86_64-linux/include" -U "__CUDA_NO_HALF_OPERATORS__" -U "__CUDA_NO_HALF_CONVERSIONS__" -U "__CUDA_NO_HALF2_OPERATORS__" -U "__CUDA_NO_BFLOAT16_CONVERSIONS__" -D__CUDACC_VER_MAJOR__=12 -D__CUDACC_VER_MINOR__=4 -D__CUDACC_VER_BUILD__=131 -D__CUDA_API_VER_MAJOR__=12 -D__CUDA_API_VER_MINOR__=4 -DCUDA_API_PER_THREAD_DEFAULT_STREAM=1 -D__NVCC_DIAG_PRAGMA_SUPPORT__=1 -include "cuda_runtime.h" -m64 "kernels/flash_fwd_hdim192_bf16_sm80.cu" -o "/home/ai/tmp/tmpxft_00005bb3_00000000-7_flash_fwd_hdim192_bf16_sm80.cpp1.ii" #$ cicc --c++17 --gnu_version=130201 --display_error_number --orig_src_file_name "kernels/flash_fwd_hdim160_bf16_sm80.cu" --orig_src_path_name "/mnt/Home/ai/.cargo/git/checkouts/candle-c6a149c3b35a488f/997a57c/candle-flash-attn/kernels/flash_fwd_hdim160_bf16_sm80.cu" --allow_managed --extended-lambda --relaxed_constexpr -arch compute_75 -m64 --no-version-ident -ftz=1 -prec_div=0 -prec_sqrt=0 -fmad=1 -fast-math --gen_div_approx_ftz --include_file_name "tmpxft_00005bae_00000000-3_flash_fwd_hdim160_bf16_sm80.fatbin.c" -tused --module_id_file_name "/home/ai/tmp/tmpxft_00005bae_00000000-4_flash_fwd_hdim160_bf16_sm80.module_id" --gen_c_file_name "/home/ai/tmp/tmpxft_00005bae_00000000-6_flash_fwd_hdim160_bf16_sm80.cudafe1.c" --stub_file_name "/home/ai/tmp/tmpxft_00005bae_00000000-6_flash_fwd_hdim160_bf16_sm80.cudafe1.stub.c" --gen_device_file_name "/home/ai/tmp/tmpxft_00005bae_00000000-6_flash_fwd_hdim160_bf16_sm80.cudafe1.gpu" "/home/ai/tmp/tmpxft_00005bae_00000000-7_flash_fwd_hdim160_bf16_sm80.cpp1.ii" -o "/home/ai/tmp/tmpxft_00005bae_00000000-6_flash_fwd_hdim160_bf16_sm80.ptx" #$ cicc --c++17 --gnu_version=130201 --display_error_number --orig_src_file_name "kernels/flash_fwd_hdim192_bf16_sm80.cu" --orig_src_path_name "/mnt/Home/ai/.cargo/git/checkouts/candle-c6a149c3b35a488f/997a57c/candle-flash-attn/kernels/flash_fwd_hdim192_bf16_sm80.cu" --allow_managed --extended-lambda --relaxed_constexpr -arch compute_75 -m64 --no-version-ident -ftz=1 -prec_div=0 -prec_sqrt=0 -fmad=1 -fast-math --gen_div_approx_ftz --include_file_name "tmpxft_00005bb3_00000000-3_flash_fwd_hdim192_bf16_sm80.fatbin.c" -tused --module_id_file_name "/home/ai/tmp/tmpxft_00005bb3_00000000-4_flash_fwd_hdim192_bf16_sm80.module_id" --gen_c_file_name "/home/ai/tmp/tmpxft_00005bb3_00000000-6_flash_fwd_hdim192_bf16_sm80.cudafe1.c" --stub_file_name "/home/ai/tmp/tmpxft_00005bb3_00000000-6_flash_fwd_hdim192_bf16_sm80.cudafe1.stub.c" --gen_device_file_name "/home/ai/tmp/tmpxft_00005bb3_00000000-6_flash_fwd_hdim192_bf16_sm80.cudafe1.gpu" "/home/ai/tmp/tmpxft_00005bb3_00000000-7_flash_fwd_hdim192_bf16_sm80.cpp1.ii" -o "/home/ai/tmp/tmpxft_00005bb3_00000000-6_flash_fwd_hdim192_bf16_sm80.ptx" 2 errors detected in the compilation of "kernels/flash_fwd_hdim192_fp16_sm80.cu". # --error 0x1 -- thread '<unnamed>' panicked at /home/ai/.cargo/registry/src/index.crates.io-6f17d22bba15001f/bindgen_cuda-0.1.5/src/lib.rs:262:21: nvcc error while executing compiling: "nvcc" "--gpu-architecture=sm_75" "-c" "-o" "/mnt/Home/ai/mistral.rs/target/release/build/candle-flash-attn-c9bfae2e45a32870/out/flash_fwd_hdim192_fp16_sm80-3981fe996a7e8814.o" "--default-stream" "per-thread" "-std=c++17" "-O3" "-U__CUDA_NO_HALF_OPERATORS__" "-U__CUDA_NO_HALF_CONVERSIONS__" "-U__CUDA_NO_HALF2_OPERATORS__" "-U__CUDA_NO_BFLOAT16_CONVERSIONS__" "-Icutlass/include" "--expt-relaxed-constexpr" "--expt-extended-lambda" "--use_fast_math" "--verbose" "kernels/flash_fwd_hdim192_fp16_sm80.cu" # stdout # stderr kernels/flash_fwd_kernel.h(67): error: static assertion failed static_assert(decltype(size<1>(tPrP) == size<1>(tPgP))::value); ^ detected during: instantiation of "void flash::write_softmax_to_gmem(const cute::Tensor<Engine0, Layout0> &, cute::Tensor<Engine1, Layout1> &, TiledCopy) [with Engine0=cute::array_aligned<cutlass::half_t, 32UL, 16UL>, Layout0=cute::Layout<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::_1>, cute::_1, cute::_8>, cute::tuple<cute::tuple<cute::_1, cute::_2, cute::constant<int, 0>>, cute::constant<int, 0>, cute::_4>>, Engine1=cute::ViewEngine<cute::gmem_ptr<cutlass::half_t>>, Layout1=cute::Layout<cute::tuple<cute::tuple<cute::_1, cute::_8>, cute::constant<int, 4>, cute::_1>, cute::tuple<cute::tuple<cute::constant<int, 0>, cute::constant<int, 1>>, int, cute::constant<int, 0>>>, TiledCopy=cute::TiledCopy<cute::Copy_Atom<cute::DefaultCopy, cutlass::half_t>, cute::Layout<cute::tuple<cute::tuple<cute::constant<int, 8>, cute::constant<int, 16>>, cute::_8>, cute::tuple<cute::tuple<cute::_128, cute::_1>, cute::_16>>, cute::tuple<cute::_16, cute::constant<int, 64>>>]" at line 436 instantiation of "void flash::compute_attn_1rowblock<Kernel_traits,Is_dropout,Is_causal,Is_local,Has_alibi,Is_even_MN,Is_even_K,Return_softmax,Params>(const Params &, int, int, int) [with Kernel_traits=Flash_fwd_kernel_traits<96, 64, 64, 4, false, false, cutlass::bfloat16_t, Flash_kernel_traits<96, 64, 64, 4, cutlass::bfloat16_t>>, Is_dropout=true, Is_causal=true, Is_local=false, Has_alibi=true, Is_even_MN=false, Is_even_K=true, Return_softmax=true, Params=Flash_fwd_params]" at line 630 instantiation of "void flash::compute_attn<Kernel_traits,Is_dropout,Is_causal,Is_local,Has_alibi,Is_even_MN,Is_even_K,Return_softmax,Params>(const Params &) [with Kernel_traits=Flash_fwd_kernel_traits<96, 64, 64, 4, false, false, cutlass::bfloat16_t, Flash_kernel_traits<96, 64, 64, 4, cutlass::bfloat16_t>>, Is_dropout=true, Is_causal=true, Is_local=false, Has_alibi=true, Is_even_MN=false, Is_even_K=true, Return_softmax=true, Params=Flash_fwd_params]" at line 14 of kernels/flash_fwd_launch_template.h instantiation of "void flash_fwd_kernel<Kernel_traits,Is_dropout,Is_causal,Is_local,Has_alibi,Is_even_MN,Is_even_K,Return_softmax>(Flash_fwd_params) [with Kernel_traits=Flash_fwd_kernel_traits<96, 64, 64, 4, false, false, cutlass::bfloat16_t, Flash_kernel_traits<96, 64, 64, 4, cutlass::bfloat16_t>>, Is_dropout=true, Is_causal=true, Is_local=false, Has_alibi=true, Is_even_MN=false, Is_even_K=true, Return_softmax=true]" at line 31 of kernels/flash_fwd_launch_template.h instantiation of "void run_flash_fwd<Kernel_traits,Is_dropout,Is_causal>(Flash_fwd_params &, cudaStream_t) [with Kernel_traits=Flash_fwd_kernel_traits<96, 64, 64, 4, false, false, cutlass::bfloat16_t, Flash_kernel_traits<96, 64, 64, 4, cutlass::bfloat16_t>>, Is_dropout=true, Is_causal=true]" at line 95 of kernels/flash_fwd_launch_template.h instantiation of "void run_mha_fwd_hdim96<T>(Flash_fwd_params &, cudaStream_t) [with T=cutlass::bfloat16_t]" at line 9 of kernels/flash_fwd_hdim96_bf16_sm80.cu kernels/flash_fwd_kernel.h(67): error: static assertion failed static_assert(decltype(size<1>(tPrP) == size<1>(tPgP))::value); ^ detected during: instantiation of "void flash::write_softmax_to_gmem(const cute::Tensor<Engine0, Layout0> &, cute::Tensor<Engine1, Layout1> &, TiledCopy) [with Engine0=cute::array_aligned<cutlass::half_t, 64UL, 16UL>, Layout0=cute::Layout<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::_1>, cute::_2, cute::_8>, cute::tuple<cute::tuple<cute::_1, cute::_2, cute::constant<int, 0>>, cute::_4, cute::_8>>, Engine1=cute::ViewEngine<cute::gmem_ptr<cutlass::half_t>>, Layout1=cute::Layout<cute::tuple<cute::tuple<cute::_1, cute::_8>, cute::_8, cute::_1>, cute::tuple<cute::tuple<cute::constant<int, 0>, cute::constant<int, 1>>, int, cute::constant<int, 0>>>, TiledCopy=cute::TiledCopy<cute::Copy_Atom<cute::DefaultCopy, cutlass::half_t>, cute::Layout<cute::tuple<cute::tuple<cute::constant<int, 8>, cute::constant<int, 16>>, cute::_8>, cute::tuple<cute::tuple<cute::_128, cute::_1>, cute::constant<int, 16>>>, cute::tuple<cute::_16, cute::constant<int, 64>>>]" at line 436 instantiation of "void flash::compute_attn_1rowblock<Kernel_traits,Is_dropout,Is_causal,Is_local,Has_alibi,Is_even_MN,Is_even_K,Return_softmax,Params>(const Params &, int, int, int) [with Kernel_traits=Flash_fwd_kernel_traits<64, 128, 64, 4, false, false, cutlass::bfloat16_t, Flash_kernel_traits<64, 128, 64, 4, cutlass::bfloat16_t>>, Is_dropout=true, Is_causal=true, Is_local=false, Has_alibi=true, Is_even_MN=false, Is_even_K=true, Return_softmax=true, Params=Flash_fwd_params]" at line 630 instantiation of "void flash::compute_attn<Kernel_traits,Is_dropout,Is_causal,Is_local,Has_alibi,Is_even_MN,Is_even_K,Return_softmax,Params>(const Params &) [with Kernel_traits=Flash_fwd_kernel_traits<64, 128, 64, 4, false, false, cutlass::bfloat16_t, Flash_kernel_traits<64, 128, 64, 4, cutlass::bfloat16_t>>, Is_dropout=true, Is_causal=true, Is_local=false, Has_alibi=true, Is_even_MN=false, Is_even_K=true, Return_softmax=true, Params=Flash_fwd_params]" at line 14 of kernels/flash_fwd_launch_template.h instantiation of "void flash_fwd_kernel<Kernel_traits,Is_dropout,Is_causal,Is_local,Has_alibi,Is_even_MN,Is_even_K,Return_softmax>(Flash_fwd_params) [with Kernel_traits=Flash_fwd_kernel_traits<64, 128, 64, 4, false, false, cutlass::bfloat16_t, Flash_kernel_traits<64, 128, 64, 4, cutlass::bfloat16_t>>, Is_dropout=true, Is_causal=true, Is_local=false, Has_alibi=true, Is_even_MN=false, Is_even_K=true, Return_softmax=true]" at line 31 of kernels/flash_fwd_launch_template.h instantiation of "void run_flash_fwd<Kernel_traits,Is_dropout,Is_causal>(Flash_fwd_params &, cudaStream_t) [with Kernel_traits=Flash_fwd_kernel_traits<64, 128, 64, 4, false, false, cutlass::bfloat16_t, Flash_kernel_traits<64, 128, 64, 4, cutlass::bfloat16_t>>, Is_dropout=true, Is_causal=true]" at line 71 of kernels/flash_fwd_launch_template.h instantiation of "void run_mha_fwd_hdim64<T>(Flash_fwd_params &, cudaStream_t) [with T=cutlass::bfloat16_t]" at line 9 of kernels/flash_fwd_hdim64_bf16_sm80.cu kernels/flash_fwd_kernel.h(67): error: static assertion failed static_assert(decltype(size<1>(tPrP) == size<1>(tPgP))::value); ^ detected during: instantiation of "void flash::write_softmax_to_gmem(const cute::Tensor<Engine0, Layout0> &, cute::Tensor<Engine1, Layout1> &, TiledCopy) [with Engine0=cute::array_aligned<cutlass::half_t, 128UL, 16UL>, Layout0=cute::Layout<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::_1>, cute::_2, cute::_16>, cute::tuple<cute::tuple<cute::_1, cute::_2, cute::constant<int, 0>>, cute::_4, cute::_8>>, Engine1=cute::ViewEngine<cute::gmem_ptr<cutlass::half_t>>, Layout1=cute::Layout<cute::tuple<cute::tuple<cute::_1, cute::_8>, cute::_16, cute::_1>, cute::tuple<cute::tuple<cute::constant<int, 0>, cute::constant<int, 1>>, int, cute::constant<int, 0>>>, TiledCopy=cute::TiledCopy<cute::Copy_Atom<cute::DefaultCopy, cutlass::half_t>, cute::Layout<cute::tuple<cute::tuple<cute::_16, cute::_8>, cute::_8>, cute::tuple<cute::tuple<cute::_64, cute::_1>, cute::_8>>, cute::tuple<cute::_8, cute::_128>>]" at line 436 instantiation of "void flash::compute_attn_1rowblock<Kernel_traits,Is_dropout,Is_causal,Is_local,Has_alibi,Is_even_MN,Is_even_K,Return_softmax,Params>(const Params &, int, int, int) [with Kernel_traits=Flash_fwd_kernel_traits<64, 128, 128, 4, false, false, cutlass::bfloat16_t, Flash_kernel_traits<64, 128, 128, 4, cutlass::bfloat16_t>>, Is_dropout=false, Is_causal=true, Is_local=false, Has_alibi=true, Is_even_MN=false, Is_even_K=true, Return_softmax=false, Params=Flash_fwd_params]" at line 630 instantiation of "void flash::compute_attn<Kernel_traits,Is_dropout,Is_causal,Is_local,Has_alibi,Is_even_MN,Is_even_K,Return_softmax,Params>(const Params &) [with Kernel_traits=Flash_fwd_kernel_traits<64, 128, 128, 4, false, false, cutlass::bfloat16_t, Flash_kernel_traits<64, 128, 128, 4, cutlass::bfloat16_t>>, Is_dropout=false, Is_causal=true, Is_local=false, Has_alibi=true, Is_even_MN=false, Is_even_K=true, Return_softmax=false, Params=Flash_fwd_params]" at line 14 of kernels/flash_fwd_launch_template.h instantiation of "void flash_fwd_kernel<Kernel_traits,Is_dropout,Is_causal,Is_local,Has_alibi,Is_even_MN,Is_even_K,Return_softmax>(Flash_fwd_params) [with Kernel_traits=Flash_fwd_kernel_traits<64, 128, 128, 4, false, false, cutlass::bfloat16_t, Flash_kernel_traits<64, 128, 128, 4, cutlass::bfloat16_t>>, Is_dropout=false, Is_causal=true, Is_local=false, Has_alibi=true, Is_even_MN=false, Is_even_K=true, Return_softmax=false]" at line 31 of kernels/flash_fwd_launch_template.h instantiation of "void run_flash_fwd<Kernel_traits,Is_dropout,Is_causal>(Flash_fwd_params &, cudaStream_t) [with Kernel_traits=Flash_fwd_kernel_traits<64, 128, 128, 4, false, false, cutlass::bfloat16_t, Flash_kernel_traits<64, 128, 128, 4, cutlass::bfloat16_t>>, Is_dropout=false, Is_causal=true]" at line 71 of kernels/flash_fwd_launch_template.h instantiation of "void run_mha_fwd_hdim64<T>(Flash_fwd_params &, cudaStream_t) [with T=cutlass::bfloat16_t]" at line 9 of kernels/flash_fwd_hdim64_bf16_sm80.cu kernels/flash_fwd_kernel.h(67): error: static assertion failed static_assert(decltype(size<1>(tPrP) == size<1>(tPgP))::value); ^ detected during: instantiation of "void flash::write_softmax_to_gmem(const cute::Tensor<Engine0, Layout0> &, cute::Tensor<Engine1, Layout1> &, TiledCopy) [with Engine0=cute::array_aligned<cutlass::half_t, 64UL, 16UL>, Layout0=cute::Layout<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::_1>, cute::_2, cute::_8>, cute::tuple<cute::tuple<cute::_1, cute::_2, cute::constant<int, 0>>, cute::_4, cute::_8>>, Engine1=cute::ViewEngine<cute::gmem_ptr<cutlass::half_t>>, Layout1=cute::Layout<cute::tuple<cute::tuple<cute::_1, cute::_8>, cute::_8, cute::_1>, cute::tuple<cute::tuple<cute::constant<int, 0>, cute::constant<int, 1>>, int, cute::constant<int, 0>>>, TiledCopy=cute::TiledCopy<cute::Copy_Atom<cute::DefaultCopy, cutlass::half_t>, cute::Layout<cute::tuple<cute::tuple<cute::constant<int, 8>, cute::constant<int, 16>>, cute::_8>, cute::tuple<cute::tuple<cute::_128, cute::_1>, cute::_16>>, cute::tuple<cute::_16, cute::constant<int, 64>>>]" at line 436 instantiation of "void flash::compute_attn_1rowblock<Kernel_traits,Is_dropout,Is_causal,Is_local,Has_alibi,Is_even_MN,Is_even_K,Return_softmax,Params>(const Params &, int, int, int) [with Kernel_traits=Flash_fwd_kernel_traits<96, 128, 64, 4, false, false, cutlass::bfloat16_t, Flash_kernel_traits<96, 128, 64, 4, cutlass::bfloat16_t>>, Is_dropout=true, Is_causal=true, Is_local=false, Has_alibi=true, Is_even_MN=false, Is_even_K=true, Return_softmax=true, Params=Flash_fwd_params]" at line 630 instantiation of "void flash::compute_attn<Kernel_traits,Is_dropout,Is_causal,Is_local,Has_alibi,Is_even_MN,Is_even_K,Return_softmax,Params>(const Params &) [with Kernel_traits=Flash_fwd_kernel_traits<96, 128, 64, 4, false, false, cutlass::bfloat16_t, Flash_kernel_traits<96, 128, 64, 4, cutlass::bfloat16_t>>, Is_dropout=true, Is_causal=true, Is_local=false, Has_alibi=true, Is_even_MN=false, Is_even_K=true, Return_softmax=true, Params=Flash_fwd_params]" at line 14 of kernels/flash_fwd_launch_template.h instantiation of "void flash_fwd_kernel<Kernel_traits,Is_dropout,Is_causal,Is_local,Has_alibi,Is_even_MN,Is_even_K,Return_softmax>(Flash_fwd_params) [with Kernel_traits=Flash_fwd_kernel_traits<96, 128, 64, 4, false, false, cutlass::bfloat16_t, Flash_kernel_traits<96, 128, 64, 4, cutlass::bfloat16_t>>, Is_dropout=true, Is_causal=true, Is_local=false, Has_alibi=true, Is_even_MN=false, Is_even_K=true, Return_softmax=true]" at line 31 of kernels/flash_fwd_launch_template.h instantiation of "void run_flash_fwd<Kernel_traits,Is_dropout,Is_causal>(Flash_fwd_params &, cudaStream_t) [with Kernel_traits=Flash_fwd_kernel_traits<96, 128, 64, 4, false, false, cutlass::bfloat16_t, Flash_kernel_traits<96, 128, 64, 4, cutlass::bfloat16_t>>, Is_dropout=true, Is_causal=true]" at line 95 of kernels/flash_fwd_launch_template.h instantiation of "void run_mha_fwd_hdim96<T>(Flash_fwd_params &, cudaStream_t) [with T=cutlass::bfloat16_t]" at line 9 of kernels/flash_fwd_hdim96_bf16_sm80.cu kernels/flash_fwd_kernel.h(67): error: static assertion failed static_assert(decltype(size<1>(tPrP) == size<1>(tPgP))::value); ^ detected during: instantiation of "void flash::write_softmax_to_gmem(const cute::Tensor<Engine0, Layout0> &, cute::Tensor<Engine1, Layout1> &, TiledCopy) [with Engine0=cute::array_aligned<cutlass::half_t, 32UL, 16UL>, Layout0=cute::Layout<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::_1>, cute::_1, cute::_8>, cute::tuple<cute::tuple<cute::_1, cute::_2, cute::constant<int, 0>>, cute::constant<int, 0>, cute::_4>>, Engine1=cute::ViewEngine<cute::gmem_ptr<cutlass::half_t>>, Layout1=cute::Layout<cute::tuple<cute::tuple<cute::_1, cute::_8>, cute::constant<int, 4>, cute::_1>, cute::tuple<cute::tuple<cute::constant<int, 0>, cute::constant<int, 1>>, int, cute::constant<int, 0>>>, TiledCopy=cute::TiledCopy<cute::Copy_Atom<cute::DefaultCopy, cutlass::half_t>, cute::Layout<cute::tuple<cute::tuple<cute::constant<int, 8>, cute::constant<int, 16>>, cute::_8>, cute::tuple<cute::tuple<cute::_128, cute::_1>, cute::_16>>, cute::tuple<cute::_16, cute::constant<int, 64>>>]" at line 436 instantiation of "void flash::compute_attn_1rowblock<Kernel_traits,Is_dropout,Is_causal,Is_local,Has_alibi,Is_even_MN,Is_even_K,Return_softmax,Params>(const Params &, int, int, int) [with Kernel_traits=Flash_fwd_kernel_traits<160, 64, 64, 4, false, false, cutlass::bfloat16_t, Flash_kernel_traits<160, 64, 64, 4, cutlass::bfloat16_t>>, Is_dropout=true, Is_causal=true, Is_local=false, Has_alibi=true, Is_even_MN=false, Is_even_K=true, Return_softmax=true, Params=Flash_fwd_params]" at line 630 instantiation of "void flash::compute_attn<Kernel_traits,Is_dropout,Is_causal,Is_local,Has_alibi,Is_even_MN,Is_even_K,Return_softmax,Params>(const Params &) [with Kernel_traits=Flash_fwd_kernel_traits<160, 64, 64, 4, false, false, cutlass::bfloat16_t, Flash_kernel_traits<160, 64, 64, 4, cutlass::bfloat16_t>>, Is_dropout=true, Is_causal=true, Is_local=false, Has_alibi=true, Is_even_MN=false, Is_even_K=true, Return_softmax=true, Params=Flash_fwd_params]" at line 14 of kernels/flash_fwd_launch_template.h instantiation of "void flash_fwd_kernel<Kernel_traits,Is_dropout,Is_causal,Is_local,Has_alibi,Is_even_MN,Is_even_K,Return_softmax>(Flash_fwd_params) [with Kernel_traits=Flash_fwd_kernel_traits<160, 64, 64, 4, false, false, cutlass::bfloat16_t, Flash_kernel_traits<160, 64, 64, 4, cutlass::bfloat16_t>>, Is_dropout=true, Is_causal=true, Is_local=false, Has_alibi=true, Is_even_MN=false, Is_even_K=true, Return_softmax=true]" at line 31 of kernels/flash_fwd_launch_template.h instantiation of "void run_flash_fwd<Kernel_traits,Is_dropout,Is_causal>(Flash_fwd_params &, cudaStream_t) [with Kernel_traits=Flash_fwd_kernel_traits<160, 64, 64, 4, false, false, cutlass::bfloat16_t, Flash_kernel_traits<160, 64, 64, 4, cutlass::bfloat16_t>>, Is_dropout=true, Is_causal=true]" at line 158 of kernels/flash_fwd_launch_template.h instantiation of "void run_mha_fwd_hdim160<T>(Flash_fwd_params &, cudaStream_t) [with T=cutlass::bfloat16_t]" at line 9 of kernels/flash_fwd_hdim160_bf16_sm80.cu 2 errors detected in the compilation of "kernels/flash_fwd_hdim64_bf16_sm80.cu". # --error 0x1 -- thread '<unnamed>' panicked at /home/ai/.cargo/registry/src/index.crates.io-6f17d22bba15001f/bindgen_cuda-0.1.5/src/lib.rs:262:21: nvcc error while executing compiling: "nvcc" "--gpu-architecture=sm_75" "-c" "-o" "/mnt/Home/ai/mistral.rs/target/release/build/candle-flash-attn-c9bfae2e45a32870/out/flash_fwd_hdim64_bf16_sm80-eaa7ce7f57eb7351.o" "--default-stream" "per-thread" "-std=c++17" "-O3" "-U__CUDA_NO_HALF_OPERATORS__" "-U__CUDA_NO_HALF_CONVERSIONS__" "-U__CUDA_NO_HALF2_OPERATORS__" "-U__CUDA_NO_BFLOAT16_CONVERSIONS__" "-Icutlass/include" "--expt-relaxed-constexpr" "--expt-extended-lambda" "--use_fast_math" "--verbose" "kernels/flash_fwd_hdim64_bf16_sm80.cu" # stdout # stderr kernels/flash_fwd_kernel.h(67): error: static assertion failed static_assert(decltype(size<1>(tPrP) == size<1>(tPgP))::value); ^ detected during: instantiation of "void flash::write_softmax_to_gmem(const cute::Tensor<Engine0, Layout0> &, cute::Tensor<Engine1, Layout1> &, TiledCopy) [with Engine0=cute::array_aligned<cutlass::half_t, 32UL, 16UL>, Layout0=cute::Layout<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::_1>, cute::_1, cute::_8>, cute::tuple<cute::tuple<cute::_1, cute::_2, cute::constant<int, 0>>, cute::constant<int, 0>, cute::_4>>, Engine1=cute::ViewEngine<cute::gmem_ptr<cutlass::half_t>>, Layout1=cute::Layout<cute::tuple<cute::tuple<cute::_1, cute::_8>, cute::constant<int, 4>, cute::_1>, cute::tuple<cute::tuple<cute::constant<int, 0>, cute::constant<int, 1>>, int, cute::constant<int, 0>>>, TiledCopy=cute::TiledCopy<cute::Copy_Atom<cute::DefaultCopy, cutlass::half_t>, cute::Layout<cute::tuple<cute::tuple<cute::constant<int, 8>, cute::constant<int, 16>>, cute::_8>, cute::tuple<cute::tuple<cute::_128, cute::_1>, cute::_16>>, cute::tuple<cute::_16, cute::constant<int, 64>>>]" at line 436 instantiation of "void flash::compute_attn_1rowblock<Kernel_traits,Is_dropout,Is_causal,Is_local,Has_alibi,Is_even_MN,Is_even_K,Return_softmax,Params>(const Params &, int, int, int) [with Kernel_traits=Flash_fwd_kernel_traits<192, 64, 64, 4, false, false, cutlass::bfloat16_t, Flash_kernel_traits<192, 64, 64, 4, cutlass::bfloat16_t>>, Is_dropout=true, Is_causal=true, Is_local=false, Has_alibi=true, Is_even_MN=false, Is_even_K=true, Return_softmax=true, Params=Flash_fwd_params]" at line 630 instantiation of "void flash::compute_attn<Kernel_traits,Is_dropout,Is_causal,Is_local,Has_alibi,Is_even_MN,Is_even_K,Return_softmax,Params>(const Params &) [with Kernel_traits=Flash_fwd_kernel_traits<192, 64, 64, 4, false, false, cutlass::bfloat16_t, Flash_kernel_traits<192, 64, 64, 4, cutlass::bfloat16_t>>, Is_dropout=true, Is_causal=true, Is_local=false, Has_alibi=true, Is_even_MN=false, Is_even_K=true, Return_softmax=true, Params=Flash_fwd_params]" at line 14 of kernels/flash_fwd_launch_template.h instantiation of "void flash_fwd_kernel<Kernel_traits,Is_dropout,Is_causal,Is_local,Has_alibi,Is_even_MN,Is_even_K,Return_softmax>(Flash_fwd_params) [with Kernel_traits=Flash_fwd_kernel_traits<192, 64, 64, 4, false, false, cutlass::bfloat16_t, Flash_kernel_traits<192, 64, 64, 4, cutlass::bfloat16_t>>, Is_dropout=true, Is_causal=true, Is_local=false, Has_alibi=true, Is_even_MN=false, Is_even_K=true, Return_softmax=true]" at line 31 of kernels/flash_fwd_launch_template.h instantiation of "void run_flash_fwd<Kernel_traits,Is_dropout,Is_causal>(Flash_fwd_params &, cudaStream_t) [with Kernel_traits=Flash_fwd_kernel_traits<192, 64, 64, 4, false, false, cutlass::bfloat16_t, Flash_kernel_traits<192, 64, 64, 4, cutlass::bfloat16_t>>, Is_dropout=true, Is_causal=true]" at line 186 of kernels/flash_fwd_launch_template.h instantiation of "void run_mha_fwd_hdim192<T>(Flash_fwd_params &, cudaStream_t) [with T=cutlass::bfloat16_t]" at line 9 of kernels/flash_fwd_hdim192_bf16_sm80.cu 2 errors detected in the compilation of "kernels/flash_fwd_hdim96_bf16_sm80.cu". # --error 0x1 -- thread '<unnamed>' panicked at /home/ai/.cargo/registry/src/index.crates.io-6f17d22bba15001f/bindgen_cuda-0.1.5/src/lib.rs:262:21: nvcc error while executing compiling: "nvcc" "--gpu-architecture=sm_75" "-c" "-o" "/mnt/Home/ai/mistral.rs/target/release/build/candle-flash-attn-c9bfae2e45a32870/out/flash_fwd_hdim96_bf16_sm80-f51ba409eb93ce41.o" "--default-stream" "per-thread" "-std=c++17" "-O3" "-U__CUDA_NO_HALF_OPERATORS__" "-U__CUDA_NO_HALF_CONVERSIONS__" "-U__CUDA_NO_HALF2_OPERATORS__" "-U__CUDA_NO_BFLOAT16_CONVERSIONS__" "-Icutlass/include" "--expt-relaxed-constexpr" "--expt-extended-lambda" "--use_fast_math" "--verbose" "kernels/flash_fwd_hdim96_bf16_sm80.cu" # stdout # stderr kernels/flash_fwd_kernel.h(67): error: static assertion failed static_assert(decltype(size<1>(tPrP) == size<1>(tPgP))::value); ^ detected during: instantiation of "void flash::write_softmax_to_gmem(const cute::Tensor<Engine0, Layout0> &, cute::Tensor<Engine1, Layout1> &, TiledCopy) [with Engine0=cute::array_aligned<cutlass::half_t, 32UL, 16UL>, Layout0=cute::Layout<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::_1>, cute::_2, cute::constant<int, 4>>, cute::tuple<cute::tuple<cute::_1, cute::_2, cute::constant<int, 0>>, cute::_4, cute::_8>>, Engine1=cute::ViewEngine<cute::gmem_ptr<cutlass::half_t>>, Layout1=cute::Layout<cute::tuple<cute::tuple<cute::_1, cute::_8>, cute::constant<int, 4>, cute::_1>, cute::tuple<cute::tuple<cute::constant<int, 0>, cute::constant<int, 1>>, int, cute::constant<int, 0>>>, TiledCopy=cute::TiledCopy<cute::Copy_Atom<cute::DefaultCopy, cutlass::half_t>, cute::Layout<cute::tuple<cute::tuple<cute::constant<int, 4>, cute::constant<int, 32>>, cute::_8>, cute::tuple<cute::tuple<cute::_256, cute::_1>, cute::constant<int, 32>>>, cute::tuple<cute::constant<int, 32>, cute::constant<int, 32>>>]" at line 436 instantiation of "void flash::compute_attn_1rowblock<Kernel_traits,Is_dropout,Is_causal,Is_local,Has_alibi,Is_even_MN,Is_even_K,Return_softmax,Params>(const Params &, int, int, int) [with Kernel_traits=Flash_fwd_kernel_traits<160, 128, 32, 4, false, false, cutlass::bfloat16_t, Flash_kernel_traits<160, 128, 32, 4, cutlass::bfloat16_t>>, Is_dropout=true, Is_causal=true, Is_local=false, Has_alibi=true, Is_even_MN=false, Is_even_K=true, Return_softmax=true, Params=Flash_fwd_params]" at line 630 instantiation of "void flash::compute_attn<Kernel_traits,Is_dropout,Is_causal,Is_local,Has_alibi,Is_even_MN,Is_even_K,Return_softmax,Params>(const Params &) [with Kernel_traits=Flash_fwd_kernel_traits<160, 128, 32, 4, false, false, cutlass::bfloat16_t, Flash_kernel_traits<160, 128, 32, 4, cutlass::bfloat16_t>>, Is_dropout=true, Is_causal=true, Is_local=false, Has_alibi=true, Is_even_MN=false, Is_even_K=true, Return_softmax=true, Params=Flash_fwd_params]" at line 14 of kernels/flash_fwd_launch_template.h instantiation of "void flash_fwd_kernel<Kernel_traits,Is_dropout,Is_causal,Is_local,Has_alibi,Is_even_MN,Is_even_K,Return_softmax>(Flash_fwd_params) [with Kernel_traits=Flash_fwd_kernel_traits<160, 128, 32, 4, false, false, cutlass::bfloat16_t, Flash_kernel_traits<160, 128, 32, 4, cutlass::bfloat16_t>>, Is_dropout=true, Is_causal=true, Is_local=false, Has_alibi=true, Is_even_MN=false, Is_even_K=true, Return_softmax=true]" at line 31 of kernels/flash_fwd_launch_template.h instantiation of "void run_flash_fwd<Kernel_traits,Is_dropout,Is_causal>(Flash_fwd_params &, cudaStream_t) [with Kernel_traits=Flash_fwd_kernel_traits<160, 128, 32, 4, false, false, cutlass::bfloat16_t, Flash_kernel_traits<160, 128, 32, 4, cutlass::bfloat16_t>>, Is_dropout=true, Is_causal=true]" at line 158 of kernels/flash_fwd_launch_template.h instantiation of "void run_mha_fwd_hdim160<T>(Flash_fwd_params &, cudaStream_t) [with T=cutlass::bfloat16_t]" at line 9 of kernels/flash_fwd_hdim160_bf16_sm80.cu kernels/flash_fwd_kernel.h(67): error: static assertion failed static_assert(decltype(size<1>(tPrP) == size<1>(tPgP))::value); ^ detected during: instantiation of "void flash::write_softmax_to_gmem(const cute::Tensor<Engine0, Layout0> &, cute::Tensor<Engine1, Layout1> &, TiledCopy) [with Engine0=cute::array_aligned<cutlass::half_t, 32UL, 16UL>, Layout0=cute::Layout<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::_1>, cute::_1, cute::_8>, cute::tuple<cute::tuple<cute::_1, cute::_2, cute::constant<int, 0>>, cute::constant<int, 0>, cute::_4>>, Engine1=cute::ViewEngine<cute::gmem_ptr<cutlass::half_t>>, Layout1=cute::Layout<cute::tuple<cute::tuple<cute::_1, cute::_8>, cute::constant<int, 4>, cute::_1>, cute::tuple<cute::tuple<cute::constant<int, 0>, cute::constant<int, 1>>, int, cute::constant<int, 0>>>, TiledCopy=cute::TiledCopy<cute::Copy_Atom<cute::DefaultCopy, cutlass::half_t>, cute::Layout<cute::tuple<cute::tuple<cute::constant<int, 8>, cute::constant<int, 32>>, cute::_8>, cute::tuple<cute::tuple<cute::_256, cute::_1>, cute::constant<int, 32>>>, cute::tuple<cute::_32, cute::constant<int, 64>>>]" at line 436 instantiation of "void flash::compute_attn_1rowblock<Kernel_traits,Is_dropout,Is_causal,Is_local,Has_alibi,Is_even_MN,Is_even_K,Return_softmax,Params>(const Params &, int, int, int) [with Kernel_traits=Flash_fwd_kernel_traits<192, 128, 64, 8, false, false, cutlass::bfloat16_t, Flash_kernel_traits<192, 128, 64, 8, cutlass::bfloat16_t>>, Is_dropout=false, Is_causal=true, Is_local=false, Has_alibi=true, Is_even_MN=false, Is_even_K=true, Return_softmax=false, Params=Flash_fwd_params]" at line 630 instantiation of "void flash::compute_attn<Kernel_traits,Is_dropout,Is_causal,Is_local,Has_alibi,Is_even_MN,Is_even_K,Return_softmax,Params>(const Params &) [with Kernel_traits=Flash_fwd_kernel_traits<192, 128, 64, 8, false, false, cutlass::bfloat16_t, Flash_kernel_traits<192, 128, 64, 8, cutlass::bfloat16_t>>, Is_dropout=false, Is_causal=true, Is_local=false, Has_alibi=true, Is_even_MN=false, Is_even_K=true, Return_softmax=false, Params=Flash_fwd_params]" at line 14 of kernels/flash_fwd_launch_template.h instantiation of "void flash_fwd_kernel<Kernel_traits,Is_dropout,Is_causal,Is_local,Has_alibi,Is_even_MN,Is_even_K,Return_softmax>(Flash_fwd_params) [with Kernel_traits=Flash_fwd_kernel_traits<192, 128, 64, 8, false, false, cutlass::bfloat16_t, Flash_kernel_traits<192, 128, 64, 8, cutlass::bfloat16_t>>, Is_dropout=false, Is_causal=true, Is_local=false, Has_alibi=true, Is_even_MN=false, Is_even_K=true, Return_softmax=false]" at line 31 of kernels/flash_fwd_launch_template.h instantiation of "void run_flash_fwd<Kernel_traits,Is_dropout,Is_causal>(Flash_fwd_params &, cudaStream_t) [with Kernel_traits=Flash_fwd_kernel_traits<192, 128, 64, 8, false, false, cutlass::bfloat16_t, Flash_kernel_traits<192, 128, 64, 8, cutlass::bfloat16_t>>, Is_dropout=false, Is_causal=true]" at line 186 of kernels/flash_fwd_launch_template.h instantiation of "void run_mha_fwd_hdim192<T>(Flash_fwd_params &, cudaStream_t) [with T=cutlass::bfloat16_t]" at line 9 of kernels/flash_fwd_hdim192_bf16_sm80.cu 2 errors detected in the compilation of "kernels/flash_fwd_hdim192_bf16_sm80.cu". # --error 0x1 -- thread '<unnamed>' panicked at /home/ai/.cargo/registry/src/index.crates.io-6f17d22bba15001f/bindgen_cuda-0.1.5/src/lib.rs:262:21: nvcc error while executing compiling: "nvcc" "--gpu-architecture=sm_75" "-c" "-o" "/mnt/Home/ai/mistral.rs/target/release/build/candle-flash-attn-c9bfae2e45a32870/out/flash_fwd_hdim192_bf16_sm80-f7453c8601d43b17.o" "--default-stream" "per-thread" "-std=c++17" "-O3" "-U__CUDA_NO_HALF_OPERATORS__" "-U__CUDA_NO_HALF_CONVERSIONS__" "-U__CUDA_NO_HALF2_OPERATORS__" "-U__CUDA_NO_BFLOAT16_CONVERSIONS__" "-Icutlass/include" "--expt-relaxed-constexpr" "--expt-extended-lambda" "--use_fast_math" "--verbose" "kernels/flash_fwd_hdim192_bf16_sm80.cu" # stdout # stderr kernels/flash_fwd_kernel.h(67): error: static assertion failed static_assert(decltype(size<1>(tPrP) == size<1>(tPgP))::value); ^ detected during: instantiation of "void flash::write_softmax_to_gmem(const cute::Tensor<Engine0, Layout0> &, cute::Tensor<Engine1, Layout1> &, TiledCopy) [with Engine0=cute::array_aligned<cutlass::half_t, 32UL, 16UL>, Layout0=cute::Layout<cute::tuple<cute::tuple<cute::_2, cute::_2, cute::_1>, cute::_1, cute::_8>, cute::tuple<cute::tuple<cute::_1, cute::_2, cute::constant<int, 0>>, cute::constant<int, 0>, cute::_4>>, Engine1=cute::ViewEngine<cute::gmem_ptr<cutlass::half_t>>, Layout1=cute::Layout<cute::tuple<cute::tuple<cute::_1, cute::_8>, cute::constant<int, 4>, cute::_1>, cute::tuple<cute::tuple<cute::constant<int, 0>, cute::constant<int, 1>>, int, cute::constant<int, 0>>>, TiledCopy=cute::TiledCopy<cute::Copy_Atom<cute::DefaultCopy, cutlass::half_t>, cute::Layout<cute::tuple<cute::tuple<cute::constant<int, 8>, cute::constant<int, 32>>, cute::_8>, cute::tuple<cute::tuple<cute::_256, cute::_1>, cute::constant<int, 32>>>, cute::tuple<cute::constant<int, 32>, cute::constant<int, 64>>>]" at line 436 instantiation of "void flash::compute_attn_1rowblock<Kernel_traits,Is_dropout,Is_causal,Is_local,Has_alibi,Is_even_MN,Is_even_K,Return_softmax,Params>(const Params &, int, int, int) [with Kernel_traits=Flash_fwd_kernel_traits<160, 128, 64, 8, false, false, cutlass::bfloat16_t, Flash_kernel_traits<160, 128, 64, 8, cutlass::bfloat16_t>>, Is_dropout=true, Is_causal=false, Is_local=true, Has_alibi=true, Is_even_MN=false, Is_even_K=true, Return_softmax=true, Params=Flash_fwd_params]" at line 630 instantiation of "void flash::compute_attn<Kernel_traits,Is_dropout,Is_causal,Is_local,Has_alibi,Is_even_MN,Is_even_K,Return_softmax,Params>(const Params &) [with Kernel_traits=Flash_fwd_kernel_traits<160, 128, 64, 8, false, false, cutlass::bfloat16_t, Flash_kernel_traits<160, 128, 64, 8, cutlass::bfloat16_t>>, Is_dropout=true, Is_causal=false, Is_local=true, Has_alibi=true, Is_even_MN=false, Is_even_K=true, Return_softmax=true, Params=Flash_fwd_params]" at line 14 of kernels/flash_fwd_launch_template.h instantiation of "void flash_fwd_kernel<Kernel_traits,Is_dropout,Is_causal,Is_local,Has_alibi,Is_even_MN,Is_even_K,Return_softmax>(Flash_fwd_params) [with Kernel_traits=Flash_fwd_kernel_traits<160, 128, 64, 8, false, false, cutlass::bfloat16_t, Flash_kernel_traits<160, 128, 64, 8, cutlass::bfloat16_t>>, Is_dropout=true, Is_causal=false, Is_local=true, Has_alibi=true, Is_even_MN=false, Is_even_K=true, Return_softmax=true]" at line 31 of kernels/flash_fwd_launch_template.h instantiation of "void run_flash_fwd<Kernel_traits,Is_dropout,Is_causal>(Flash_fwd_params &, cudaStream_t) [with Kernel_traits=Flash_fwd_kernel_traits<160, 128, 64, 8, false, false, cutlass::bfloat16_t, Flash_kernel_traits<160, 128, 64, 8, cutlass::bfloat16_t>>, Is_dropout=true, Is_causal=false]" at line 158 of kernels/flash_fwd_launch_template.h instantiation of "void run_mha_fwd_hdim160<T>(Flash_fwd_params &, cudaStream_t) [with T=cutlass::bfloat16_t]" at line 9 of kernels/flash_fwd_hdim160_bf16_sm80.cu 3 errors detected in the compilation of "kernels/flash_fwd_hdim160_bf16_sm80.cu". # --error 0x1 -- thread '<unnamed>' panicked at /home/ai/.cargo/registry/src/index.crates.io-6f17d22bba15001f/bindgen_cuda-0.1.5/src/lib.rs:262:21: nvcc error while executing compiling: "nvcc" "--gpu-architecture=sm_75" "-c" "-o" "/mnt/Home/ai/mistral.rs/target/release/build/candle-flash-attn-c9bfae2e45a32870/out/flash_fwd_hdim160_bf16_sm80-b8e226bc00ecbaf1.o" "--default-stream" "per-thread" "-std=c++17" "-O3" "-U__CUDA_NO_HALF_OPERATORS__" "-U__CUDA_NO_HALF_CONVERSIONS__" "-U__CUDA_NO_HALF2_OPERATORS__" "-U__CUDA_NO_BFLOAT16_CONVERSIONS__" "-Icutlass/include" "--expt-relaxed-constexpr" "--expt-extended-lambda" "--use_fast_math" "--verbose" "kernels/flash_fwd_hdim160_bf16_sm80.cu" # stdout # stderr
Editor is loading...
Leave a Comment