[clang] 9df395b - [Clang][VE] Add vector mask intrinsics to clang

Kazushi Marukawa via cfe-commits cfe-commits at lists.llvm.org
Thu Mar 17 02:52:34 PDT 2022


Author: Kazushi (Jam) Marukawa
Date: 2022-03-17T18:52:28+09:00
New Revision: 9df395bb68a6798fa859e9ff61d38e2267df4333

URL: https://github.com/llvm/llvm-project/commit/9df395bb68a6798fa859e9ff61d38e2267df4333
DIFF: https://github.com/llvm/llvm-project/commit/9df395bb68a6798fa859e9ff61d38e2267df4333.diff

LOG: [Clang][VE] Add vector mask intrinsics to clang

Add vector mask intrinsics instructions to clang.

Reviewed By: simoll

Differential Revision: https://reviews.llvm.org/D121816

Added: 
    

Modified: 
    clang/include/clang/Basic/BuiltinsVEVL.gen.def
    clang/lib/Headers/velintrin.h
    clang/test/CodeGen/VE/ve-velintrin.c

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/BuiltinsVEVL.gen.def b/clang/include/clang/Basic/BuiltinsVEVL.gen.def
index 9960c89b53001..7b06e5c30e93b 100644
--- a/clang/include/clang/Basic/BuiltinsVEVL.gen.def
+++ b/clang/include/clang/Basic/BuiltinsVEVL.gen.def
@@ -31,123 +31,195 @@ BUILTIN(__builtin_ve_vl_vldl2dzx_vssvl, "V256dLUivC*V256dUi", "n")
 BUILTIN(__builtin_ve_vl_vldl2dzxnc_vssl, "V256dLUivC*Ui", "n")
 BUILTIN(__builtin_ve_vl_vldl2dzxnc_vssvl, "V256dLUivC*V256dUi", "n")
 BUILTIN(__builtin_ve_vl_vst_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vst_vssml, "vV256dLUiv*V256bUi", "n")
 BUILTIN(__builtin_ve_vl_vstnc_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstnc_vssml, "vV256dLUiv*V256bUi", "n")
 BUILTIN(__builtin_ve_vl_vstot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstot_vssml, "vV256dLUiv*V256bUi", "n")
 BUILTIN(__builtin_ve_vl_vstncot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstncot_vssml, "vV256dLUiv*V256bUi", "n")
 BUILTIN(__builtin_ve_vl_vstu_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstu_vssml, "vV256dLUiv*V256bUi", "n")
 BUILTIN(__builtin_ve_vl_vstunc_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstunc_vssml, "vV256dLUiv*V256bUi", "n")
 BUILTIN(__builtin_ve_vl_vstuot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstuot_vssml, "vV256dLUiv*V256bUi", "n")
 BUILTIN(__builtin_ve_vl_vstuncot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstuncot_vssml, "vV256dLUiv*V256bUi", "n")
 BUILTIN(__builtin_ve_vl_vstl_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstl_vssml, "vV256dLUiv*V256bUi", "n")
 BUILTIN(__builtin_ve_vl_vstlnc_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstlnc_vssml, "vV256dLUiv*V256bUi", "n")
 BUILTIN(__builtin_ve_vl_vstlot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstlot_vssml, "vV256dLUiv*V256bUi", "n")
 BUILTIN(__builtin_ve_vl_vstlncot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstlncot_vssml, "vV256dLUiv*V256bUi", "n")
 BUILTIN(__builtin_ve_vl_vst2d_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vst2d_vssml, "vV256dLUiv*V256bUi", "n")
 BUILTIN(__builtin_ve_vl_vst2dnc_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vst2dnc_vssml, "vV256dLUiv*V256bUi", "n")
 BUILTIN(__builtin_ve_vl_vst2dot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vst2dot_vssml, "vV256dLUiv*V256bUi", "n")
 BUILTIN(__builtin_ve_vl_vst2dncot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vst2dncot_vssml, "vV256dLUiv*V256bUi", "n")
 BUILTIN(__builtin_ve_vl_vstu2d_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstu2d_vssml, "vV256dLUiv*V256bUi", "n")
 BUILTIN(__builtin_ve_vl_vstu2dnc_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstu2dnc_vssml, "vV256dLUiv*V256bUi", "n")
 BUILTIN(__builtin_ve_vl_vstu2dot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstu2dot_vssml, "vV256dLUiv*V256bUi", "n")
 BUILTIN(__builtin_ve_vl_vstu2dncot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstu2dncot_vssml, "vV256dLUiv*V256bUi", "n")
 BUILTIN(__builtin_ve_vl_vstl2d_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstl2d_vssml, "vV256dLUiv*V256bUi", "n")
 BUILTIN(__builtin_ve_vl_vstl2dnc_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstl2dnc_vssml, "vV256dLUiv*V256bUi", "n")
 BUILTIN(__builtin_ve_vl_vstl2dot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstl2dot_vssml, "vV256dLUiv*V256bUi", "n")
 BUILTIN(__builtin_ve_vl_vstl2dncot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstl2dncot_vssml, "vV256dLUiv*V256bUi", "n")
 BUILTIN(__builtin_ve_vl_pfchv_ssl, "vLivC*Ui", "n")
 BUILTIN(__builtin_ve_vl_pfchvnc_ssl, "vLivC*Ui", "n")
 BUILTIN(__builtin_ve_vl_lsv_vvss, "V256dV256dUiLUi", "n")
 BUILTIN(__builtin_ve_vl_lvsl_svs, "LUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_lvsd_svs, "dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_lvss_svs, "fV256dUi", "n")
+BUILTIN(__builtin_ve_vl_lvm_mmss, "V256bV256bLUiLUi", "n")
+BUILTIN(__builtin_ve_vl_lvm_MMss, "V512bV512bLUiLUi", "n")
+BUILTIN(__builtin_ve_vl_svm_sms, "LUiV256bLUi", "n")
+BUILTIN(__builtin_ve_vl_svm_sMs, "LUiV512bLUi", "n")
 BUILTIN(__builtin_ve_vl_vbrdd_vsl, "V256ddUi", "n")
 BUILTIN(__builtin_ve_vl_vbrdd_vsvl, "V256ddV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vbrdd_vsmvl, "V256ddV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vbrdl_vsl, "V256dLiUi", "n")
 BUILTIN(__builtin_ve_vl_vbrdl_vsvl, "V256dLiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vbrdl_vsmvl, "V256dLiV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vbrds_vsl, "V256dfUi", "n")
 BUILTIN(__builtin_ve_vl_vbrds_vsvl, "V256dfV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vbrds_vsmvl, "V256dfV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vbrdw_vsl, "V256diUi", "n")
 BUILTIN(__builtin_ve_vl_vbrdw_vsvl, "V256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vbrdw_vsmvl, "V256diV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvbrd_vsl, "V256dLUiUi", "n")
 BUILTIN(__builtin_ve_vl_pvbrd_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvbrd_vsMvl, "V256dLUiV512bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmv_vsvl, "V256dUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmv_vsvvl, "V256dUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmv_vsvmvl, "V256dUiV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vaddul_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vaddul_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vaddul_vsvl, "V256dLUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vaddul_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddul_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddul_vsvmvl, "V256dLUiV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vadduw_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vadduw_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vadduw_vsvl, "V256dUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vadduw_vsvvl, "V256dUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vadduw_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vadduw_vsvmvl, "V256dUiV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvaddu_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvaddu_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvaddu_vsvl, "V256dLUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvaddu_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvaddu_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvaddu_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vaddswsx_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vaddswsx_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vaddswsx_vsvl, "V256diV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vaddswsx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddswsx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddswsx_vsvmvl, "V256diV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vaddswzx_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vaddswzx_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vaddswzx_vsvl, "V256diV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vaddswzx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddswzx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddswzx_vsvmvl, "V256diV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvadds_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvadds_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvadds_vsvl, "V256dLUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvadds_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvadds_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvadds_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vaddsl_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vaddsl_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vaddsl_vsvl, "V256dLiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vaddsl_vsvvl, "V256dLiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddsl_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddsl_vsvmvl, "V256dLiV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsubul_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsubul_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsubul_vsvl, "V256dLUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsubul_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubul_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubul_vsvmvl, "V256dLUiV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsubuw_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsubuw_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsubuw_vsvl, "V256dUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsubuw_vsvvl, "V256dUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubuw_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubuw_vsvmvl, "V256dUiV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvsubu_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvsubu_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvsubu_vsvl, "V256dLUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvsubu_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsubu_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsubu_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsubswsx_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsubswsx_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsubswsx_vsvl, "V256diV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsubswsx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubswsx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubswsx_vsvmvl, "V256diV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsubswzx_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsubswzx_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsubswzx_vsvl, "V256diV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsubswzx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubswzx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubswzx_vsvmvl, "V256diV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvsubs_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvsubs_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvsubs_vsvl, "V256dLUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvsubs_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsubs_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsubs_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsubsl_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsubsl_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsubsl_vsvl, "V256dLiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsubsl_vsvvl, "V256dLiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubsl_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubsl_vsvmvl, "V256dLiV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmulul_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmulul_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmulul_vsvl, "V256dLUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmulul_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulul_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulul_vsvmvl, "V256dLUiV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmuluw_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmuluw_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmuluw_vsvl, "V256dUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmuluw_vsvvl, "V256dUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmuluw_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmuluw_vsvmvl, "V256dUiV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmulswsx_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmulswsx_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmulswsx_vsvl, "V256diV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmulswsx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulswsx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulswsx_vsvmvl, "V256diV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmulswzx_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmulswzx_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmulswzx_vsvl, "V256diV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmulswzx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulswzx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulswzx_vsvmvl, "V256diV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmulsl_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmulsl_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmulsl_vsvl, "V256dLiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmulsl_vsvvl, "V256dLiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulsl_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulsl_vsvmvl, "V256dLiV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmulslw_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmulslw_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmulslw_vsvl, "V256diV256dUi", "n")
@@ -156,148 +228,221 @@ BUILTIN(__builtin_ve_vl_vdivul_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vdivul_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vdivul_vsvl, "V256dLUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vdivul_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivul_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivul_vsvmvl, "V256dLUiV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vdivuw_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vdivuw_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vdivuw_vsvl, "V256dUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vdivuw_vsvvl, "V256dUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivuw_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivuw_vsvmvl, "V256dUiV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vdivul_vvsl, "V256dV256dLUiUi", "n")
 BUILTIN(__builtin_ve_vl_vdivul_vvsvl, "V256dV256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivul_vvsmvl, "V256dV256dLUiV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vdivuw_vvsl, "V256dV256dUiUi", "n")
 BUILTIN(__builtin_ve_vl_vdivuw_vvsvl, "V256dV256dUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivuw_vvsmvl, "V256dV256dUiV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vdivswsx_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vdivswsx_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vdivswsx_vsvl, "V256diV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vdivswsx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswsx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswsx_vsvmvl, "V256diV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vdivswzx_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vdivswzx_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vdivswzx_vsvl, "V256diV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vdivswzx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswzx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswzx_vsvmvl, "V256diV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vdivswsx_vvsl, "V256dV256diUi", "n")
 BUILTIN(__builtin_ve_vl_vdivswsx_vvsvl, "V256dV256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswsx_vvsmvl, "V256dV256diV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vdivswzx_vvsl, "V256dV256diUi", "n")
 BUILTIN(__builtin_ve_vl_vdivswzx_vvsvl, "V256dV256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswzx_vvsmvl, "V256dV256diV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vdivsl_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vdivsl_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vdivsl_vsvl, "V256dLiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vdivsl_vsvvl, "V256dLiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivsl_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivsl_vsvmvl, "V256dLiV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vdivsl_vvsl, "V256dV256dLiUi", "n")
 BUILTIN(__builtin_ve_vl_vdivsl_vvsvl, "V256dV256dLiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivsl_vvsmvl, "V256dV256dLiV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcmpul_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcmpul_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcmpul_vsvl, "V256dLUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcmpul_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpul_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpul_vsvmvl, "V256dLUiV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcmpuw_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcmpuw_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcmpuw_vsvl, "V256dUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcmpuw_vsvvl, "V256dUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpuw_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpuw_vsvmvl, "V256dUiV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvcmpu_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvcmpu_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvcmpu_vsvl, "V256dLUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvcmpu_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcmpu_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcmpu_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcmpswsx_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcmpswsx_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcmpswsx_vsvl, "V256diV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcmpswsx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpswsx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpswsx_vsvmvl, "V256diV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcmpswzx_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcmpswzx_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcmpswzx_vsvl, "V256diV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcmpswzx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpswzx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpswzx_vsvmvl, "V256diV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvcmps_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvcmps_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvcmps_vsvl, "V256dLUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvcmps_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcmps_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcmps_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcmpsl_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcmpsl_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcmpsl_vsvl, "V256dLiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcmpsl_vsvvl, "V256dLiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpsl_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpsl_vsvmvl, "V256dLiV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmaxswsx_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmaxswsx_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmaxswsx_vsvl, "V256diV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmaxswsx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxswsx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxswsx_vsvmvl, "V256diV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmaxswzx_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmaxswzx_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmaxswzx_vsvl, "V256diV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmaxswzx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxswzx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxswzx_vsvmvl, "V256diV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvmaxs_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvmaxs_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvmaxs_vsvl, "V256dLUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvmaxs_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvmaxs_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvmaxs_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vminswsx_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vminswsx_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vminswsx_vsvl, "V256diV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vminswsx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminswsx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminswsx_vsvmvl, "V256diV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vminswzx_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vminswzx_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vminswzx_vsvl, "V256diV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vminswzx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminswzx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminswzx_vsvmvl, "V256diV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvmins_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvmins_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvmins_vsvl, "V256dLUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvmins_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvmins_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvmins_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmaxsl_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmaxsl_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmaxsl_vsvl, "V256dLiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vmaxsl_vsvvl, "V256dLiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxsl_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxsl_vsvmvl, "V256dLiV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vminsl_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vminsl_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vminsl_vsvl, "V256dLiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vminsl_vsvvl, "V256dLiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminsl_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminsl_vsvmvl, "V256dLiV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vand_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vand_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vand_vsvl, "V256dLUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vand_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vand_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vand_vsvmvl, "V256dLUiV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvand_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvand_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvand_vsvl, "V256dLUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvand_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvand_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvand_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vor_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vor_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vor_vsvl, "V256dLUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vor_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vor_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vor_vsvmvl, "V256dLUiV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvor_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvor_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvor_vsvl, "V256dLUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvor_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvor_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvor_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vxor_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vxor_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vxor_vsvl, "V256dLUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vxor_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vxor_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vxor_vsvmvl, "V256dLUiV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvxor_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvxor_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvxor_vsvl, "V256dLUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvxor_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvxor_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvxor_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_veqv_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_veqv_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_veqv_vsvl, "V256dLUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_veqv_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_veqv_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_veqv_vsvmvl, "V256dLUiV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pveqv_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pveqv_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pveqv_vsvl, "V256dLUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pveqv_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pveqv_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pveqv_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vldz_vvl, "V256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vldz_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vldz_vvmvl, "V256dV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvldzlo_vvl, "V256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvldzlo_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvldzlo_vvmvl, "V256dV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvldzup_vvl, "V256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvldzup_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvldzup_vvmvl, "V256dV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvldz_vvl, "V256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvldz_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvldz_vvMvl, "V256dV256dV512bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vpcnt_vvl, "V256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vpcnt_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vpcnt_vvmvl, "V256dV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvpcntlo_vvl, "V256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvpcntlo_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvpcntlo_vvmvl, "V256dV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvpcntup_vvl, "V256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvpcntup_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvpcntup_vvmvl, "V256dV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvpcnt_vvl, "V256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvpcnt_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvpcnt_vvMvl, "V256dV256dV512bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vbrv_vvl, "V256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vbrv_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vbrv_vvmvl, "V256dV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvbrvlo_vvl, "V256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvbrvlo_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvbrvlo_vvmvl, "V256dV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvbrvup_vvl, "V256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvbrvup_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvbrvup_vvmvl, "V256dV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvbrv_vvl, "V256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvbrv_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvbrv_vvMvl, "V256dV256dV512bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vseq_vl, "V256dUi", "n")
 BUILTIN(__builtin_ve_vl_vseq_vvl, "V256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvseqlo_vl, "V256dUi", "n")
@@ -310,96 +455,143 @@ BUILTIN(__builtin_ve_vl_vsll_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsll_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsll_vvsl, "V256dV256dLUiUi", "n")
 BUILTIN(__builtin_ve_vl_vsll_vvsvl, "V256dV256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsll_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsll_vvsmvl, "V256dV256dLUiV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvsll_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvsll_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvsll_vvsl, "V256dV256dLUiUi", "n")
 BUILTIN(__builtin_ve_vl_pvsll_vvsvl, "V256dV256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsll_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsll_vvsMvl, "V256dV256dLUiV512bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsrl_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsrl_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsrl_vvsl, "V256dV256dLUiUi", "n")
 BUILTIN(__builtin_ve_vl_vsrl_vvsvl, "V256dV256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsrl_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsrl_vvsmvl, "V256dV256dLUiV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvsrl_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvsrl_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvsrl_vvsl, "V256dV256dLUiUi", "n")
 BUILTIN(__builtin_ve_vl_pvsrl_vvsvl, "V256dV256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsrl_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsrl_vvsMvl, "V256dV256dLUiV512bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vslawsx_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vslawsx_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vslawsx_vvsl, "V256dV256diUi", "n")
 BUILTIN(__builtin_ve_vl_vslawsx_vvsvl, "V256dV256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vslawsx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vslawsx_vvsmvl, "V256dV256diV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vslawzx_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vslawzx_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vslawzx_vvsl, "V256dV256diUi", "n")
 BUILTIN(__builtin_ve_vl_vslawzx_vvsvl, "V256dV256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vslawzx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vslawzx_vvsmvl, "V256dV256diV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvsla_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvsla_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvsla_vvsl, "V256dV256dLUiUi", "n")
 BUILTIN(__builtin_ve_vl_pvsla_vvsvl, "V256dV256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsla_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsla_vvsMvl, "V256dV256dLUiV512bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vslal_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vslal_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vslal_vvsl, "V256dV256dLiUi", "n")
 BUILTIN(__builtin_ve_vl_vslal_vvsvl, "V256dV256dLiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vslal_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vslal_vvsmvl, "V256dV256dLiV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsrawsx_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsrawsx_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsrawsx_vvsl, "V256dV256diUi", "n")
 BUILTIN(__builtin_ve_vl_vsrawsx_vvsvl, "V256dV256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsrawsx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsrawsx_vvsmvl, "V256dV256diV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsrawzx_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsrawzx_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsrawzx_vvsl, "V256dV256diUi", "n")
 BUILTIN(__builtin_ve_vl_vsrawzx_vvsvl, "V256dV256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsrawzx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsrawzx_vvsmvl, "V256dV256diV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvsra_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvsra_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvsra_vvsl, "V256dV256dLUiUi", "n")
 BUILTIN(__builtin_ve_vl_pvsra_vvsvl, "V256dV256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsra_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsra_vvsMvl, "V256dV256dLUiV512bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsral_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsral_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsral_vvsl, "V256dV256dLiUi", "n")
 BUILTIN(__builtin_ve_vl_vsral_vvsvl, "V256dV256dLiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsral_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsral_vvsmvl, "V256dV256dLiV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsfa_vvssl, "V256dV256dLUiLUiUi", "n")
 BUILTIN(__builtin_ve_vl_vsfa_vvssvl, "V256dV256dLUiLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsfa_vvssmvl, "V256dV256dLUiLUiV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfaddd_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfaddd_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfaddd_vsvl, "V256ddV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfaddd_vsvvl, "V256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfaddd_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfaddd_vsvmvl, "V256ddV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfadds_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfadds_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfadds_vsvl, "V256dfV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfadds_vsvvl, "V256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfadds_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfadds_vsvmvl, "V256dfV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfadd_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfadd_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfadd_vsvl, "V256dLUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfadd_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfadd_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfadd_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfsubd_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfsubd_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfsubd_vsvl, "V256ddV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfsubd_vsvvl, "V256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsubd_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsubd_vsvmvl, "V256ddV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfsubs_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfsubs_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfsubs_vsvl, "V256dfV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfsubs_vsvvl, "V256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsubs_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsubs_vsvmvl, "V256dfV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfsub_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfsub_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfsub_vsvl, "V256dLUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfsub_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfsub_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfsub_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmuld_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmuld_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmuld_vsvl, "V256ddV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmuld_vsvvl, "V256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmuld_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmuld_vsvmvl, "V256ddV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmuls_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmuls_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmuls_vsvl, "V256dfV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmuls_vsvvl, "V256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmuls_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmuls_vsvmvl, "V256dfV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfmul_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfmul_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfmul_vsvl, "V256dLUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfmul_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmul_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmul_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfdivd_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfdivd_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfdivd_vsvl, "V256ddV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfdivd_vsvvl, "V256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfdivd_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfdivd_vsvmvl, "V256ddV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfdivs_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfdivs_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfdivs_vsvl, "V256dfV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfdivs_vsvvl, "V256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfdivs_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfdivs_vsvmvl, "V256dfV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfsqrtd_vvl, "V256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfsqrtd_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfsqrts_vvl, "V256dV256dUi", "n")
@@ -408,110 +600,164 @@ BUILTIN(__builtin_ve_vl_vfcmpd_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfcmpd_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfcmpd_vsvl, "V256ddV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfcmpd_vsvvl, "V256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfcmpd_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfcmpd_vsvmvl, "V256ddV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfcmps_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfcmps_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfcmps_vsvl, "V256dfV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfcmps_vsvvl, "V256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfcmps_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfcmps_vsvmvl, "V256dfV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfcmp_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfcmp_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfcmp_vsvl, "V256dLUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfcmp_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfcmp_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfcmp_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmaxd_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmaxd_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmaxd_vsvl, "V256ddV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmaxd_vsvvl, "V256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmaxd_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmaxd_vsvmvl, "V256ddV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmaxs_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmaxs_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmaxs_vsvl, "V256dfV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmaxs_vsvvl, "V256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmaxs_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmaxs_vsvmvl, "V256dfV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfmax_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfmax_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfmax_vsvl, "V256dLUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfmax_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmax_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmax_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmind_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmind_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmind_vsvl, "V256ddV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmind_vsvvl, "V256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmind_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmind_vsvmvl, "V256ddV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmins_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmins_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmins_vsvl, "V256dfV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmins_vsvvl, "V256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmins_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmins_vsvmvl, "V256dfV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfmin_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfmin_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfmin_vsvl, "V256dLUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfmin_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmin_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmin_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmadd_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmadd_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmadd_vsvvl, "V256ddV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmadd_vsvvvl, "V256ddV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmadd_vvsvl, "V256dV256ddV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmadd_vvsvvl, "V256dV256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmadd_vvvvmvl, "V256dV256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmadd_vsvvmvl, "V256ddV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmadd_vvsvmvl, "V256dV256ddV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmads_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmads_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmads_vsvvl, "V256dfV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmads_vsvvvl, "V256dfV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmads_vvsvl, "V256dV256dfV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmads_vvsvvl, "V256dV256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmads_vvvvmvl, "V256dV256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmads_vsvvmvl, "V256dfV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmads_vvsvmvl, "V256dV256dfV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfmad_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfmad_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfmad_vsvvl, "V256dLUiV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfmad_vsvvvl, "V256dLUiV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfmad_vvsvl, "V256dV256dLUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfmad_vvsvvl, "V256dV256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmad_vvvvMvl, "V256dV256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmad_vsvvMvl, "V256dLUiV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmad_vvsvMvl, "V256dV256dLUiV256dV512bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmsbd_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmsbd_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmsbd_vsvvl, "V256ddV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmsbd_vsvvvl, "V256ddV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmsbd_vvsvl, "V256dV256ddV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmsbd_vvsvvl, "V256dV256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbd_vvvvmvl, "V256dV256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbd_vsvvmvl, "V256ddV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbd_vvsvmvl, "V256dV256ddV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmsbs_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmsbs_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmsbs_vsvvl, "V256dfV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmsbs_vsvvvl, "V256dfV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmsbs_vvsvl, "V256dV256dfV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfmsbs_vvsvvl, "V256dV256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbs_vvvvmvl, "V256dV256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbs_vsvvmvl, "V256dfV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbs_vvsvmvl, "V256dV256dfV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfmsb_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfmsb_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfmsb_vsvvl, "V256dLUiV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfmsb_vsvvvl, "V256dLUiV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfmsb_vvsvl, "V256dV256dLUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfmsb_vvsvvl, "V256dV256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmsb_vvvvMvl, "V256dV256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmsb_vsvvMvl, "V256dLUiV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmsb_vvsvMvl, "V256dV256dLUiV256dV512bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfnmadd_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfnmadd_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfnmadd_vsvvl, "V256ddV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfnmadd_vsvvvl, "V256ddV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfnmadd_vvsvl, "V256dV256ddV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfnmadd_vvsvvl, "V256dV256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmadd_vvvvmvl, "V256dV256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmadd_vsvvmvl, "V256ddV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmadd_vvsvmvl, "V256dV256ddV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfnmads_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfnmads_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfnmads_vsvvl, "V256dfV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfnmads_vsvvvl, "V256dfV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfnmads_vvsvl, "V256dV256dfV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfnmads_vvsvvl, "V256dV256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmads_vvvvmvl, "V256dV256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmads_vsvvmvl, "V256dfV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmads_vvsvmvl, "V256dV256dfV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfnmad_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfnmad_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfnmad_vsvvl, "V256dLUiV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfnmad_vsvvvl, "V256dLUiV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfnmad_vvsvl, "V256dV256dLUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfnmad_vvsvvl, "V256dV256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmad_vvvvMvl, "V256dV256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmad_vsvvMvl, "V256dLUiV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmad_vvsvMvl, "V256dV256dLUiV256dV512bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfnmsbd_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfnmsbd_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfnmsbd_vsvvl, "V256ddV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfnmsbd_vsvvvl, "V256ddV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfnmsbd_vvsvl, "V256dV256ddV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfnmsbd_vvsvvl, "V256dV256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbd_vvvvmvl, "V256dV256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbd_vsvvmvl, "V256ddV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbd_vvsvmvl, "V256dV256ddV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfnmsbs_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfnmsbs_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfnmsbs_vsvvl, "V256dfV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfnmsbs_vsvvvl, "V256dfV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfnmsbs_vvsvl, "V256dV256dfV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfnmsbs_vvsvvl, "V256dV256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbs_vvvvmvl, "V256dV256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbs_vsvvmvl, "V256dfV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbs_vvsvmvl, "V256dV256dfV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfnmsb_vvvvl, "V256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfnmsb_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfnmsb_vsvvl, "V256dLUiV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfnmsb_vsvvvl, "V256dLUiV256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfnmsb_vvsvl, "V256dV256dLUiV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvfnmsb_vvsvvl, "V256dV256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmsb_vvvvMvl, "V256dV256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmsb_vsvvMvl, "V256dLUiV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmsb_vvsvMvl, "V256dV256dLUiV256dV512bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vrcpd_vvl, "V256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vrcpd_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vrcps_vvl, "V256dV256dUi", "n")
@@ -532,28 +778,40 @@ BUILTIN(__builtin_ve_vl_pvrsqrtnex_vvl, "V256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvrsqrtnex_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcvtwdsx_vvl, "V256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcvtwdsx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwdsx_vvmvl, "V256dV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcvtwdsxrz_vvl, "V256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcvtwdsxrz_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwdsxrz_vvmvl, "V256dV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcvtwdzx_vvl, "V256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcvtwdzx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwdzx_vvmvl, "V256dV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcvtwdzxrz_vvl, "V256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcvtwdzxrz_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwdzxrz_vvmvl, "V256dV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcvtwssx_vvl, "V256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcvtwssx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwssx_vvmvl, "V256dV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcvtwssxrz_vvl, "V256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcvtwssxrz_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwssxrz_vvmvl, "V256dV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcvtwszx_vvl, "V256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcvtwszx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwszx_vvmvl, "V256dV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcvtwszxrz_vvl, "V256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcvtwszxrz_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwszxrz_vvmvl, "V256dV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvcvtws_vvl, "V256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvcvtws_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcvtws_vvMvl, "V256dV256dV512bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvcvtwsrz_vvl, "V256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_pvcvtwsrz_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcvtwsrz_vvMvl, "V256dV256dV512bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcvtld_vvl, "V256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcvtld_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtld_vvmvl, "V256dV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcvtldrz_vvl, "V256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcvtldrz_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtldrz_vvmvl, "V256dV256dV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcvtdw_vvl, "V256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcvtdw_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcvtsw_vvl, "V256dV256dUi", "n")
@@ -566,13 +824,312 @@ BUILTIN(__builtin_ve_vl_vcvtds_vvl, "V256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcvtds_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcvtsd_vvl, "V256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vcvtsd_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmrg_vvvml, "V256dV256dV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vmrg_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmrg_vsvml, "V256dLUiV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vmrg_vsvmvl, "V256dLUiV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmrgw_vvvMl, "V256dV256dV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_vmrgw_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmrgw_vsvMl, "V256dUiV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_vmrgw_vsvMvl, "V256dUiV256dV512bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vshf_vvvsl, "V256dV256dV256dLUiUi", "n")
 BUILTIN(__builtin_ve_vl_vshf_vvvsvl, "V256dV256dV256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcp_vvmvl, "V256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vex_vvmvl, "V256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklat_ml, "V256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklaf_ml, "V256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkat_Ml, "V512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkaf_Ml, "V512bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklgt_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklgt_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkllt_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkllt_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklne_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklne_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkleq_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkleq_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklge_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklge_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklle_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklle_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklnum_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklnum_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklgtnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklgtnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklltnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklltnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklnenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklnenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkleqnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkleqnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklgenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklgenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkllenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkllenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwgt_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwgt_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwlt_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwlt_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwne_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwne_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkweq_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkweq_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwge_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwge_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwle_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwle_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwnum_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwnum_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwgtnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwgtnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwltnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwltnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwnenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwnenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkweqnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkweqnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwgenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwgenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwlenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwlenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlogt_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupgt_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlogt_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupgt_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlolt_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwuplt_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlolt_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwuplt_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlone_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupne_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlone_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupne_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwloeq_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupeq_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwloeq_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupeq_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwloge_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupge_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwloge_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupge_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlole_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwuple_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlole_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwuple_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlonum_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupnum_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlonum_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupnum_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlonan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlonan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlogtnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupgtnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlogtnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupgtnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwloltnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupltnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwloltnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupltnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlonenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupnenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlonenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupnenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwloeqnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupeqnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwloeqnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupeqnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlogenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupgenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlogenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupgenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlolenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwuplenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlolenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwuplenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwgt_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwgt_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlt_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlt_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwne_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwne_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkweq_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkweq_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwge_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwge_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwle_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwle_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwnum_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwnum_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwnan_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwnan_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwgtnan_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwgtnan_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwltnan_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwltnan_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwnenan_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwnenan_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkweqnan_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkweqnan_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwgenan_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwgenan_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlenan_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlenan_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdgt_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdgt_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdlt_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdlt_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdne_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdne_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdeq_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdeq_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdge_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdge_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdle_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdle_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdnum_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdnum_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdgtnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdgtnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdltnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdltnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdnenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdnenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdeqnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdeqnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdgenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdgenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdlenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdlenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksgt_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksgt_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkslt_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkslt_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksne_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksne_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkseq_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkseq_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksge_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksge_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksle_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksle_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksnum_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksnum_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksgtnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksgtnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksltnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksltnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksnenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksnenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkseqnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkseqnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksgenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksgenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkslenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkslenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslogt_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupgt_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslogt_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupgt_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslolt_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksuplt_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslolt_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksuplt_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslone_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupne_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslone_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupne_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksloeq_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupeq_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksloeq_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupeq_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksloge_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupge_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksloge_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupge_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslole_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksuple_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslole_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksuple_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslonum_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupnum_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslonum_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupnum_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslonan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslonan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslogtnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupgtnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslogtnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupgtnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksloltnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupltnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksloltnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupltnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslonenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupnenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslonenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupnenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksloeqnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupeqnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksloeqnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupeqnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslogenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupgenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslogenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupgenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslolenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksuplenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslolenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksuplenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksgt_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksgt_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslt_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslt_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksne_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksne_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkseq_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkseq_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksge_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksge_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksle_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksle_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksnum_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksnum_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksnan_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksnan_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksgtnan_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksgtnan_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksltnan_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksltnan_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksnenan_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksnenan_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkseqnan_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkseqnan_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksgenan_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksgenan_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslenan_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslenan_MvMl, "V512bV256dV512bUi", "n")
 BUILTIN(__builtin_ve_vl_vsumwsx_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsumwsx_vvml, "V256dV256dV256bUi", "n")
 BUILTIN(__builtin_ve_vl_vsumwzx_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsumwzx_vvml, "V256dV256dV256bUi", "n")
 BUILTIN(__builtin_ve_vl_vsuml_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsuml_vvml, "V256dV256dV256bUi", "n")
 BUILTIN(__builtin_ve_vl_vfsumd_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsumd_vvml, "V256dV256dV256bUi", "n")
 BUILTIN(__builtin_ve_vl_vfsums_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsums_vvml, "V256dV256dV256bUi", "n")
 BUILTIN(__builtin_ve_vl_vrmaxswfstsx_vvl, "V256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vrmaxswfstsx_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vrmaxswlstsx_vvl, "V256dV256dUi", "n")
@@ -614,36 +1171,82 @@ BUILTIN(__builtin_ve_vl_vfrminsfst_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfrminslst_vvl, "V256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vfrminslst_vvvl, "V256dV256dV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vrand_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrand_vvml, "V256dV256dV256bUi", "n")
 BUILTIN(__builtin_ve_vl_vror_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vror_vvml, "V256dV256dV256bUi", "n")
 BUILTIN(__builtin_ve_vl_vrxor_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrxor_vvml, "V256dV256dV256bUi", "n")
 BUILTIN(__builtin_ve_vl_vgt_vvssl, "V256dV256dLUiLUiUi", "n")
 BUILTIN(__builtin_ve_vl_vgt_vvssvl, "V256dV256dLUiLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vgt_vvssml, "V256dV256dLUiLUiV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vgt_vvssmvl, "V256dV256dLUiLUiV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vgtnc_vvssl, "V256dV256dLUiLUiUi", "n")
 BUILTIN(__builtin_ve_vl_vgtnc_vvssvl, "V256dV256dLUiLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vgtnc_vvssml, "V256dV256dLUiLUiV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vgtnc_vvssmvl, "V256dV256dLUiLUiV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vgtu_vvssl, "V256dV256dLUiLUiUi", "n")
 BUILTIN(__builtin_ve_vl_vgtu_vvssvl, "V256dV256dLUiLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vgtu_vvssml, "V256dV256dLUiLUiV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vgtu_vvssmvl, "V256dV256dLUiLUiV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vgtunc_vvssl, "V256dV256dLUiLUiUi", "n")
 BUILTIN(__builtin_ve_vl_vgtunc_vvssvl, "V256dV256dLUiLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vgtunc_vvssml, "V256dV256dLUiLUiV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vgtunc_vvssmvl, "V256dV256dLUiLUiV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vgtlsx_vvssl, "V256dV256dLUiLUiUi", "n")
 BUILTIN(__builtin_ve_vl_vgtlsx_vvssvl, "V256dV256dLUiLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vgtlsx_vvssml, "V256dV256dLUiLUiV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vgtlsx_vvssmvl, "V256dV256dLUiLUiV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vgtlsxnc_vvssl, "V256dV256dLUiLUiUi", "n")
 BUILTIN(__builtin_ve_vl_vgtlsxnc_vvssvl, "V256dV256dLUiLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vgtlsxnc_vvssml, "V256dV256dLUiLUiV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vgtlsxnc_vvssmvl, "V256dV256dLUiLUiV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vgtlzx_vvssl, "V256dV256dLUiLUiUi", "n")
 BUILTIN(__builtin_ve_vl_vgtlzx_vvssvl, "V256dV256dLUiLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vgtlzx_vvssml, "V256dV256dLUiLUiV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vgtlzx_vvssmvl, "V256dV256dLUiLUiV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vgtlzxnc_vvssl, "V256dV256dLUiLUiUi", "n")
 BUILTIN(__builtin_ve_vl_vgtlzxnc_vvssvl, "V256dV256dLUiLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vgtlzxnc_vvssml, "V256dV256dLUiLUiV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vgtlzxnc_vvssmvl, "V256dV256dLUiLUiV256bV256dUi", "n")
 BUILTIN(__builtin_ve_vl_vsc_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vsc_vvssml, "vV256dV256dLUiLUiV256bUi", "n")
 BUILTIN(__builtin_ve_vl_vscnc_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vscnc_vvssml, "vV256dV256dLUiLUiV256bUi", "n")
 BUILTIN(__builtin_ve_vl_vscot_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vscot_vvssml, "vV256dV256dLUiLUiV256bUi", "n")
 BUILTIN(__builtin_ve_vl_vscncot_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vscncot_vvssml, "vV256dV256dLUiLUiV256bUi", "n")
 BUILTIN(__builtin_ve_vl_vscu_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vscu_vvssml, "vV256dV256dLUiLUiV256bUi", "n")
 BUILTIN(__builtin_ve_vl_vscunc_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vscunc_vvssml, "vV256dV256dLUiLUiV256bUi", "n")
 BUILTIN(__builtin_ve_vl_vscuot_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vscuot_vvssml, "vV256dV256dLUiLUiV256bUi", "n")
 BUILTIN(__builtin_ve_vl_vscuncot_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vscuncot_vvssml, "vV256dV256dLUiLUiV256bUi", "n")
 BUILTIN(__builtin_ve_vl_vscl_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vscl_vvssml, "vV256dV256dLUiLUiV256bUi", "n")
 BUILTIN(__builtin_ve_vl_vsclnc_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vsclnc_vvssml, "vV256dV256dLUiLUiV256bUi", "n")
 BUILTIN(__builtin_ve_vl_vsclot_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vsclot_vvssml, "vV256dV256dLUiLUiV256bUi", "n")
 BUILTIN(__builtin_ve_vl_vsclncot_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vsclncot_vvssml, "vV256dV256dLUiLUiV256bUi", "n")
+BUILTIN(__builtin_ve_vl_andm_mmm, "V256bV256bV256b", "n")
+BUILTIN(__builtin_ve_vl_andm_MMM, "V512bV512bV512b", "n")
+BUILTIN(__builtin_ve_vl_orm_mmm, "V256bV256bV256b", "n")
+BUILTIN(__builtin_ve_vl_orm_MMM, "V512bV512bV512b", "n")
+BUILTIN(__builtin_ve_vl_xorm_mmm, "V256bV256bV256b", "n")
+BUILTIN(__builtin_ve_vl_xorm_MMM, "V512bV512bV512b", "n")
+BUILTIN(__builtin_ve_vl_eqvm_mmm, "V256bV256bV256b", "n")
+BUILTIN(__builtin_ve_vl_eqvm_MMM, "V512bV512bV512b", "n")
+BUILTIN(__builtin_ve_vl_nndm_mmm, "V256bV256bV256b", "n")
+BUILTIN(__builtin_ve_vl_nndm_MMM, "V512bV512bV512b", "n")
+BUILTIN(__builtin_ve_vl_negm_mm, "V256bV256b", "n")
+BUILTIN(__builtin_ve_vl_negm_MM, "V512bV512b", "n")
+BUILTIN(__builtin_ve_vl_pcvm_sml, "LUiV256bUi", "n")
+BUILTIN(__builtin_ve_vl_lzvm_sml, "LUiV256bUi", "n")
+BUILTIN(__builtin_ve_vl_tovm_sml, "LUiV256bUi", "n")
 BUILTIN(__builtin_ve_vl_lcr_sss, "LUiLUiLUi", "n")
 BUILTIN(__builtin_ve_vl_scr_sss, "vLUiLUiLUi", "n")
 BUILTIN(__builtin_ve_vl_tscr_ssss, "LUiLUiLUiLUi", "n")

diff  --git a/clang/lib/Headers/velintrin.h b/clang/lib/Headers/velintrin.h
index c12054a9e965b..69b1fba296d4d 100644
--- a/clang/lib/Headers/velintrin.h
+++ b/clang/lib/Headers/velintrin.h
@@ -12,9 +12,7 @@
 // Vector registers
 typedef double __vr __attribute__((__vector_size__(2048)));
 
-// TODO: Vector mask registers
-// Depend on https://reviews.llvm.org/D88905
-#if 0
+// Vector mask registers
 #if __STDC_VERSION__ >= 199901L
 // For C99
 typedef _Bool __vm    __attribute__((ext_vector_type(256)));
@@ -30,7 +28,6 @@ typedef bool __vm512 __attribute__((ext_vector_type(512)));
 #error need C++ or C99 to use vector intrinsics for VE
 #endif
 #endif
-#endif
 
 enum VShuffleCodes {
   VE_VSHUFFLE_YUYU = 0,

diff  --git a/clang/test/CodeGen/VE/ve-velintrin.c b/clang/test/CodeGen/VE/ve-velintrin.c
index 44c3309086e8b..bea291746226d 100644
--- a/clang/test/CodeGen/VE/ve-velintrin.c
+++ b/clang/test/CodeGen/VE/ve-velintrin.c
@@ -9,6 +9,8 @@ long v1, v2, v3;
 double vd1;
 float vf1;
 __vr vr1, vr2, vr3, vr4;
+__vm256 vm1, vm2, vm3;
+__vm512 vm1_512, vm2_512, vm3_512;
 
 void __attribute__((noinline))
 test_vld_vssl(char* p, long idx) {
@@ -241,6 +243,13 @@ test_vst_vssl(char* p, long idx) {
   _vel_vst_vssl(vr1, idx, p, 256);
 }
 
+void __attribute__((noinline))
+test_vst_vssml(char* p, long idx) {
+  // CHECK-LABEL: @test_vst_vssml
+  // CHECK: call void @llvm.ve.vl.vst.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vst_vssml(vr1, idx, p, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vstnc_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstnc_vssl
@@ -248,6 +257,13 @@ test_vstnc_vssl(char* p, long idx) {
   _vel_vstnc_vssl(vr1, idx, p, 256);
 }
 
+void __attribute__((noinline))
+test_vstnc_vssml(char* p, long idx) {
+  // CHECK-LABEL: @test_vstnc_vssml
+  // CHECK: call void @llvm.ve.vl.vstnc.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vstnc_vssml(vr1, idx, p, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vstot_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstot_vssl
@@ -255,6 +271,13 @@ test_vstot_vssl(char* p, long idx) {
   _vel_vstot_vssl(vr1, idx, p, 256);
 }
 
+void __attribute__((noinline))
+test_vstot_vssml(char* p, long idx) {
+  // CHECK-LABEL: @test_vstot_vssml
+  // CHECK: call void @llvm.ve.vl.vstot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vstot_vssml(vr1, idx, p, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vstncot_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstncot_vssl
@@ -262,6 +285,13 @@ test_vstncot_vssl(char* p, long idx) {
   _vel_vstncot_vssl(vr1, idx, p, 256);
 }
 
+void __attribute__((noinline))
+test_vstncot_vssml(char* p, long idx) {
+  // CHECK-LABEL: @test_vstncot_vssml
+  // CHECK: call void @llvm.ve.vl.vstncot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vstncot_vssml(vr1, idx, p, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vstu_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstu_vssl
@@ -269,6 +299,13 @@ test_vstu_vssl(char* p, long idx) {
   _vel_vstu_vssl(vr1, idx, p, 256);
 }
 
+void __attribute__((noinline))
+test_vstu_vssml(char* p, long idx) {
+  // CHECK-LABEL: @test_vstu_vssml
+  // CHECK: call void @llvm.ve.vl.vstu.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vstu_vssml(vr1, idx, p, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vstunc_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstunc_vssl
@@ -276,6 +313,13 @@ test_vstunc_vssl(char* p, long idx) {
   _vel_vstunc_vssl(vr1, idx, p, 256);
 }
 
+void __attribute__((noinline))
+test_vstunc_vssml(char* p, long idx) {
+  // CHECK-LABEL: @test_vstunc_vssml
+  // CHECK: call void @llvm.ve.vl.vstunc.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vstunc_vssml(vr1, idx, p, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vstuot_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstuot_vssl
@@ -283,6 +327,13 @@ test_vstuot_vssl(char* p, long idx) {
   _vel_vstuot_vssl(vr1, idx, p, 256);
 }
 
+void __attribute__((noinline))
+test_vstuot_vssml(char* p, long idx) {
+  // CHECK-LABEL: @test_vstuot_vssml
+  // CHECK: call void @llvm.ve.vl.vstuot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vstuot_vssml(vr1, idx, p, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vstuncot_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstuncot_vssl
@@ -290,6 +341,13 @@ test_vstuncot_vssl(char* p, long idx) {
   _vel_vstuncot_vssl(vr1, idx, p, 256);
 }
 
+void __attribute__((noinline))
+test_vstuncot_vssml(char* p, long idx) {
+  // CHECK-LABEL: @test_vstuncot_vssml
+  // CHECK: call void @llvm.ve.vl.vstuncot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vstuncot_vssml(vr1, idx, p, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vstl_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstl_vssl
@@ -297,6 +355,13 @@ test_vstl_vssl(char* p, long idx) {
   _vel_vstl_vssl(vr1, idx, p, 256);
 }
 
+void __attribute__((noinline))
+test_vstl_vssml(char* p, long idx) {
+  // CHECK-LABEL: @test_vstl_vssml
+  // CHECK: call void @llvm.ve.vl.vstl.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vstl_vssml(vr1, idx, p, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vstlnc_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstlnc_vssl
@@ -304,6 +369,13 @@ test_vstlnc_vssl(char* p, long idx) {
   _vel_vstlnc_vssl(vr1, idx, p, 256);
 }
 
+void __attribute__((noinline))
+test_vstlnc_vssml(char* p, long idx) {
+  // CHECK-LABEL: @test_vstlnc_vssml
+  // CHECK: call void @llvm.ve.vl.vstlnc.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vstlnc_vssml(vr1, idx, p, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vstlot_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstlot_vssl
@@ -311,6 +383,13 @@ test_vstlot_vssl(char* p, long idx) {
   _vel_vstlot_vssl(vr1, idx, p, 256);
 }
 
+void __attribute__((noinline))
+test_vstlot_vssml(char* p, long idx) {
+  // CHECK-LABEL: @test_vstlot_vssml
+  // CHECK: call void @llvm.ve.vl.vstlot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vstlot_vssml(vr1, idx, p, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vstlncot_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstlncot_vssl
@@ -318,6 +397,13 @@ test_vstlncot_vssl(char* p, long idx) {
   _vel_vstlncot_vssl(vr1, idx, p, 256);
 }
 
+void __attribute__((noinline))
+test_vstlncot_vssml(char* p, long idx) {
+  // CHECK-LABEL: @test_vstlncot_vssml
+  // CHECK: call void @llvm.ve.vl.vstlncot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vstlncot_vssml(vr1, idx, p, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vst2d_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vst2d_vssl
@@ -325,6 +411,13 @@ test_vst2d_vssl(char* p, long idx) {
   _vel_vst2d_vssl(vr1, idx, p, 256);
 }
 
+void __attribute__((noinline))
+test_vst2d_vssml(char* p, long idx) {
+  // CHECK-LABEL: @test_vst2d_vssml
+  // CHECK: call void @llvm.ve.vl.vst2d.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vst2d_vssml(vr1, idx, p, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vst2dnc_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vst2dnc_vssl
@@ -332,6 +425,13 @@ test_vst2dnc_vssl(char* p, long idx) {
   _vel_vst2dnc_vssl(vr1, idx, p, 256);
 }
 
+void __attribute__((noinline))
+test_vst2dnc_vssml(char* p, long idx) {
+  // CHECK-LABEL: @test_vst2dnc_vssml
+  // CHECK: call void @llvm.ve.vl.vst2dnc.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vst2dnc_vssml(vr1, idx, p, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vst2dot_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vst2dot_vssl
@@ -339,6 +439,13 @@ test_vst2dot_vssl(char* p, long idx) {
   _vel_vst2dot_vssl(vr1, idx, p, 256);
 }
 
+void __attribute__((noinline))
+test_vst2dot_vssml(char* p, long idx) {
+  // CHECK-LABEL: @test_vst2dot_vssml
+  // CHECK: call void @llvm.ve.vl.vst2dot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vst2dot_vssml(vr1, idx, p, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vst2dncot_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vst2dncot_vssl
@@ -346,6 +453,13 @@ test_vst2dncot_vssl(char* p, long idx) {
   _vel_vst2dncot_vssl(vr1, idx, p, 256);
 }
 
+void __attribute__((noinline))
+test_vst2dncot_vssml(char* p, long idx) {
+  // CHECK-LABEL: @test_vst2dncot_vssml
+  // CHECK: call void @llvm.ve.vl.vst2dncot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vst2dncot_vssml(vr1, idx, p, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vstu2d_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstu2d_vssl
@@ -353,6 +467,13 @@ test_vstu2d_vssl(char* p, long idx) {
   _vel_vstu2d_vssl(vr1, idx, p, 256);
 }
 
+void __attribute__((noinline))
+test_vstu2d_vssml(char* p, long idx) {
+  // CHECK-LABEL: @test_vstu2d_vssml
+  // CHECK: call void @llvm.ve.vl.vstu2d.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vstu2d_vssml(vr1, idx, p, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vstu2dnc_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstu2dnc_vssl
@@ -360,6 +481,13 @@ test_vstu2dnc_vssl(char* p, long idx) {
   _vel_vstu2dnc_vssl(vr1, idx, p, 256);
 }
 
+void __attribute__((noinline))
+test_vstu2dnc_vssml(char* p, long idx) {
+  // CHECK-LABEL: @test_vstu2dnc_vssml
+  // CHECK: call void @llvm.ve.vl.vstu2dnc.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vstu2dnc_vssml(vr1, idx, p, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vstu2dot_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstu2dot_vssl
@@ -367,6 +495,13 @@ test_vstu2dot_vssl(char* p, long idx) {
   _vel_vstu2dot_vssl(vr1, idx, p, 256);
 }
 
+void __attribute__((noinline))
+test_vstu2dot_vssml(char* p, long idx) {
+  // CHECK-LABEL: @test_vstu2dot_vssml
+  // CHECK: call void @llvm.ve.vl.vstu2dot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vstu2dot_vssml(vr1, idx, p, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vstu2dncot_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstu2dncot_vssl
@@ -374,6 +509,13 @@ test_vstu2dncot_vssl(char* p, long idx) {
   _vel_vstu2dncot_vssl(vr1, idx, p, 256);
 }
 
+void __attribute__((noinline))
+test_vstu2dncot_vssml(char* p, long idx) {
+  // CHECK-LABEL: @test_vstu2dncot_vssml
+  // CHECK: call void @llvm.ve.vl.vstu2dncot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vstu2dncot_vssml(vr1, idx, p, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vstl2d_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstl2d_vssl
@@ -381,6 +523,13 @@ test_vstl2d_vssl(char* p, long idx) {
   _vel_vstl2d_vssl(vr1, idx, p, 256);
 }
 
+void __attribute__((noinline))
+test_vstl2d_vssml(char* p, long idx) {
+  // CHECK-LABEL: @test_vstl2d_vssml
+  // CHECK: call void @llvm.ve.vl.vstl2d.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vstl2d_vssml(vr1, idx, p, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vstl2dnc_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstl2dnc_vssl
@@ -388,6 +537,13 @@ test_vstl2dnc_vssl(char* p, long idx) {
   _vel_vstl2dnc_vssl(vr1, idx, p, 256);
 }
 
+void __attribute__((noinline))
+test_vstl2dnc_vssml(char* p, long idx) {
+  // CHECK-LABEL: @test_vstl2dnc_vssml
+  // CHECK: call void @llvm.ve.vl.vstl2dnc.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vstl2dnc_vssml(vr1, idx, p, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vstl2dot_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstl2dot_vssl
@@ -395,6 +551,13 @@ test_vstl2dot_vssl(char* p, long idx) {
   _vel_vstl2dot_vssl(vr1, idx, p, 256);
 }
 
+void __attribute__((noinline))
+test_vstl2dot_vssml(char* p, long idx) {
+  // CHECK-LABEL: @test_vstl2dot_vssml
+  // CHECK: call void @llvm.ve.vl.vstl2dot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vstl2dot_vssml(vr1, idx, p, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vstl2dncot_vssl(char* p, long idx) {
   // CHECK-LABEL: @test_vstl2dncot_vssl
@@ -402,6 +565,13 @@ test_vstl2dncot_vssl(char* p, long idx) {
   _vel_vstl2dncot_vssl(vr1, idx, p, 256);
 }
 
+void __attribute__((noinline))
+test_vstl2dncot_vssml(char* p, long idx) {
+  // CHECK-LABEL: @test_vstl2dncot_vssml
+  // CHECK: call void @llvm.ve.vl.vstl2dncot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vstl2dncot_vssml(vr1, idx, p, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_pfchv_ssl(char* p, long idx) {
   // CHECK-LABEL: @test_pfchv_ssl
@@ -444,6 +614,34 @@ test_lvss_svs(int idx) {
   vf1 = _vel_lvss_svs(vr1, idx);
 }
 
+void __attribute__((noinline))
+test_lvm_mmss(unsigned long sy, unsigned long sz) {
+  // CHECK-LABEL: @test_lvm_mmss
+  // CHECK: call <256 x i1> @llvm.ve.vl.lvm.mmss(<256 x i1> %{{.*}}, i64 %{{.*}}, i64 %{{.*}})
+  vm1 = _vel_lvm_mmss(vm2, sy, sz);
+}
+
+void __attribute__((noinline))
+test_lvm_MMss(unsigned long sy, unsigned long sz) {
+  // CHECK-LABEL: @test_lvm_MMss
+  // CHECK: call <512 x i1> @llvm.ve.vl.lvm.MMss(<512 x i1> %{{.*}}, i64 %{{.*}}, i64 %{{.*}})
+  vm1_512 = _vel_lvm_MMss(vm2_512, sy, sz);
+}
+
+void __attribute__((noinline))
+test_svm_sms(unsigned long sy) {
+  // CHECK-LABEL: @test_svm_sms
+  // CHECK: call i64 @llvm.ve.vl.svm.sms(<256 x i1> %{{.*}}, i64 %{{.*}})
+  v1 = _vel_svm_sms(vm2, sy);
+}
+
+void __attribute__((noinline))
+test_svm_sMs(unsigned long sy) {
+  // CHECK-LABEL: @test_svm_sMs
+  // CHECK: call i64 @llvm.ve.vl.svm.sMs(<512 x i1> %{{.*}}, i64 %{{.*}})
+  v1 = _vel_svm_sMs(vm2_512, sy);
+}
+
 void __attribute__((noinline))
 test_vbrdd_vsl() {
   // CHECK-LABEL: @test_vbrdd_vsl
@@ -458,6 +656,13 @@ test_vbrdd_vsvl() {
   vr1 = _vel_vbrdd_vsvl(vd1, vr1, 256);
 }
 
+void __attribute__((noinline))
+test_vbrdd_vsmvl() {
+  // CHECK-LABEL: @test_vbrdd_vsmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vbrdd.vsmvl(double %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr1 = _vel_vbrdd_vsmvl(vd1, vm1, vr1, 256);
+}
+
 void __attribute__((noinline))
 test_vbrdl_vsl() {
   // CHECK-LABEL: @test_vbrdl_vsl
@@ -472,6 +677,13 @@ test_vbrdl_vsvl() {
   vr1 = _vel_vbrdl_vsvl(v1, vr1, 256);
 }
 
+void __attribute__((noinline))
+test_vbrdl_vsmvl() {
+  // CHECK-LABEL: @test_vbrdl_vsmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vbrdl.vsmvl(i64 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr1 = _vel_vbrdl_vsmvl(v1, vm1, vr1, 256);
+}
+
 void __attribute__((noinline))
 test_vbrds_vsl() {
   // CHECK-LABEL: @test_vbrds_vsl
@@ -486,6 +698,13 @@ test_vbrds_vsvl() {
   vr1 = _vel_vbrds_vsvl(vf1, vr1, 256);
 }
 
+void __attribute__((noinline))
+test_vbrds_vsmvl() {
+  // CHECK-LABEL: @test_vbrds_vsmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vbrds.vsmvl(float %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr1 = _vel_vbrds_vsmvl(vf1, vm1, vr1, 256);
+}
+
 void __attribute__((noinline))
 test_vbrdw_vsl() {
   // CHECK-LABEL: @test_vbrdw_vsl
@@ -500,6 +719,13 @@ test_vbrdw_vsvl() {
   vr1 = _vel_vbrdw_vsvl(v1, vr1, 256);
 }
 
+void __attribute__((noinline))
+test_vbrdw_vsmvl() {
+  // CHECK-LABEL: @test_vbrdw_vsmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vbrdw.vsmvl(i32 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr1 = _vel_vbrdw_vsmvl(v1, vm1, vr1, 256);
+}
+
 void __attribute__((noinline))
 test_pvbrd_vsl() {
   // CHECK-LABEL: @test_pvbrd_vsl
@@ -514,6 +740,13 @@ test_pvbrd_vsvl() {
   vr1 = _vel_pvbrd_vsvl(v1, vr1, 256);
 }
 
+void __attribute__((noinline))
+test_pvbrd_vsmvl() {
+  // CHECK-LABEL: @test_pvbrd_vsmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvbrd.vsMvl(i64 %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr1 = _vel_pvbrd_vsMvl(v1, vm1_512, vr1, 256);
+}
+
 void __attribute__((noinline))
 test_vmv_vsvl() {
   // CHECK-LABEL: @test_vmv_vsvl
@@ -528,6 +761,13 @@ test_vmv_vsvvl() {
   vr1 = _vel_vmv_vsvvl(v1, vr1, vr2, 256);
 }
 
+void __attribute__((noinline))
+test_vmv_vsvmvl() {
+  // CHECK-LABEL: @test_vmv_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmv.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr1 = _vel_vmv_vsvmvl(v1, vr1, vm1, vr2, 256);
+}
+
 void __attribute__((noinline))
 test_vaddul_vvvl() {
   // CHECK-LABEL: @test_vaddul_vvvl
@@ -556,6 +796,20 @@ test_vaddul_vsvvl() {
   vr3 = _vel_vaddul_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vaddul_vvvmvl() {
+  // CHECK-LABEL: @test_vaddul_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vaddul.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vaddul_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vaddul_vsvmvl() {
+  // CHECK-LABEL: @test_vaddul_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vaddul.vsvmvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vaddul_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vadduw_vvvl() {
   // CHECK-LABEL: @test_vadduw_vvvl
@@ -584,6 +838,20 @@ test_vadduw_vsvvl() {
   vr3 = _vel_vadduw_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vadduw_vvvmvl() {
+  // CHECK-LABEL: @test_vadduw_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vadduw.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vadduw_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vadduw_vsvmvl() {
+  // CHECK-LABEL: @test_vadduw_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vadduw.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vadduw_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_pvaddu_vvvl() {
   // CHECK-LABEL: @test_pvaddu_vvvl
@@ -612,6 +880,20 @@ test_pvaddu_vsvvl() {
   vr3 = _vel_pvaddu_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_pvaddu_vvvMvl() {
+  // CHECK-LABEL: @test_pvaddu_vvvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvaddu.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvaddu_vvvMvl(vr1, vr2, vm1_512, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvaddu_vsvMvl() {
+  // CHECK-LABEL: @test_pvaddu_vsvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvaddu.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvaddu_vsvMvl(v1, vr2, vm1_512, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vaddswsx_vvvl() {
   // CHECK-LABEL: @test_vaddswsx_vvvl
@@ -640,6 +922,20 @@ test_vaddswsx_vsvvl() {
   vr3 = _vel_vaddswsx_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vaddswsx_vvvmvl() {
+  // CHECK-LABEL: @test_vaddswsx_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vaddswsx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vaddswsx_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vaddswsx_vsvmvl() {
+  // CHECK-LABEL: @test_vaddswsx_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vaddswsx.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vaddswsx_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vaddswzx_vvvl() {
   // CHECK-LABEL: @test_vaddswzx_vvvl
@@ -668,6 +964,20 @@ test_vaddswzx_vsvvl() {
   vr3 = _vel_vaddswzx_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vaddswzx_vvvmvl() {
+  // CHECK-LABEL: @test_vaddswzx_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vaddswzx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vaddswzx_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vaddswzx_vsvmvl() {
+  // CHECK-LABEL: @test_vaddswzx_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vaddswzx.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vaddswzx_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_pvadds_vvvl() {
   // CHECK-LABEL: @test_pvadds_vvvl
@@ -696,6 +1006,20 @@ test_pvadds_vsvvl() {
   vr3 = _vel_pvadds_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_pvadds_vvvMvl() {
+  // CHECK-LABEL: @test_pvadds_vvvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvadds.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvadds_vvvMvl(vr1, vr2, vm1_512, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvadds_vsvMvl() {
+  // CHECK-LABEL: @test_pvadds_vsvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvadds.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvadds_vsvMvl(v1, vr2, vm1_512, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vaddsl_vvvl() {
   // CHECK-LABEL: @test_vaddsl_vvvl
@@ -724,6 +1048,20 @@ test_vaddsl_vsvvl() {
   vr3 = _vel_vaddsl_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vaddsl_vvvmvl() {
+  // CHECK-LABEL: @test_vaddsl_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vaddsl.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vaddsl_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vaddsl_vsvmvl() {
+  // CHECK-LABEL: @test_vaddsl_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vaddsl.vsvmvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vaddsl_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vsubul_vvvl() {
   // CHECK-LABEL: @test_vsubul_vvvl
@@ -752,6 +1090,20 @@ test_vsubul_vsvvl() {
   vr3 = _vel_vsubul_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vsubul_vvvmvl() {
+  // CHECK-LABEL: @test_vsubul_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsubul.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsubul_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vsubul_vsvmvl() {
+  // CHECK-LABEL: @test_vsubul_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsubul.vsvmvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsubul_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vsubuw_vvvl() {
   // CHECK-LABEL: @test_vsubuw_vvvl
@@ -780,6 +1132,20 @@ test_vsubuw_vsvvl() {
   vr3 = _vel_vsubuw_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vsubuw_vvvmvl() {
+  // CHECK-LABEL: @test_vsubuw_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsubuw.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsubuw_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vsubuw_vsvmvl() {
+  // CHECK-LABEL: @test_vsubuw_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsubuw.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsubuw_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_pvsubu_vvvl() {
   // CHECK-LABEL: @test_pvsubu_vvvl
@@ -808,6 +1174,20 @@ test_pvsubu_vsvvl() {
   vr3 = _vel_pvsubu_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_pvsubu_vvvMvl() {
+  // CHECK-LABEL: @test_pvsubu_vvvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsubu.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvsubu_vvvMvl(vr1, vr2, vm1_512, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvsubu_vsvMvl() {
+  // CHECK-LABEL: @test_pvsubu_vsvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsubu.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvsubu_vsvMvl(v1, vr2, vm1_512, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vsubswsx_vvvl() {
   // CHECK-LABEL: @test_vsubswsx_vvvl
@@ -836,6 +1216,20 @@ test_vsubswsx_vsvvl() {
   vr3 = _vel_vsubswsx_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vsubswsx_vvvmvl() {
+  // CHECK-LABEL: @test_vsubswsx_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsubswsx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsubswsx_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vsubswsx_vsvmvl() {
+  // CHECK-LABEL: @test_vsubswsx_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsubswsx.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsubswsx_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vsubswzx_vvvl() {
   // CHECK-LABEL: @test_vsubswzx_vvvl
@@ -864,6 +1258,20 @@ test_vsubswzx_vsvvl() {
   vr3 = _vel_vsubswzx_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vsubswzx_vvvmvl() {
+  // CHECK-LABEL: @test_vsubswzx_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsubswzx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsubswzx_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vsubswzx_vsvmvl() {
+  // CHECK-LABEL: @test_vsubswzx_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsubswzx.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsubswzx_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_pvsubs_vvvl() {
   // CHECK-LABEL: @test_pvsubs_vvvl
@@ -892,6 +1300,20 @@ test_pvsubs_vsvvl() {
   vr3 = _vel_pvsubs_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_pvsubs_vvvMvl() {
+  // CHECK-LABEL: @test_pvsubs_vvvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsubs.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvsubs_vvvMvl(vr1, vr2, vm1_512, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvsubs_vsvMvl() {
+  // CHECK-LABEL: @test_pvsubs_vsvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsubs.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvsubs_vsvMvl(v1, vr2, vm1_512, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vsubsl_vvvl() {
   // CHECK-LABEL: @test_vsubsl_vvvl
@@ -920,6 +1342,20 @@ test_vsubsl_vsvvl() {
   vr3 = _vel_vsubsl_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vsubsl_vvvmvl() {
+  // CHECK-LABEL: @test_vsubsl_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsubsl.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsubsl_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vsubsl_vsvmvl() {
+  // CHECK-LABEL: @test_vsubsl_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsubsl.vsvmvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsubsl_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vmulul_vvvl() {
   // CHECK-LABEL: @test_vmulul_vvvl
@@ -948,6 +1384,20 @@ test_vmulul_vsvvl() {
   vr3 = _vel_vmulul_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vmulul_vvvmvl() {
+  // CHECK-LABEL: @test_vmulul_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmulul.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmulul_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vmulul_vsvmvl() {
+  // CHECK-LABEL: @test_vmulul_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmulul.vsvmvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmulul_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vmuluw_vvvl() {
   // CHECK-LABEL: @test_vmuluw_vvvl
@@ -976,6 +1426,20 @@ test_vmuluw_vsvvl() {
   vr3 = _vel_vmuluw_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vmuluw_vvvmvl() {
+  // CHECK-LABEL: @test_vmuluw_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmuluw.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmuluw_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vmuluw_vsvmvl() {
+  // CHECK-LABEL: @test_vmuluw_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmuluw.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmuluw_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vmulswsx_vvvl() {
   // CHECK-LABEL: @test_vmulswsx_vvvl
@@ -1004,6 +1468,20 @@ test_vmulswsx_vsvvl() {
   vr3 = _vel_vmulswsx_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vmulswsx_vvvmvl() {
+  // CHECK-LABEL: @test_vmulswsx_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmulswsx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmulswsx_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vmulswsx_vsvmvl() {
+  // CHECK-LABEL: @test_vmulswsx_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmulswsx.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmulswsx_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vmulswzx_vvvl() {
   // CHECK-LABEL: @test_vmulswzx_vvvl
@@ -1032,6 +1510,20 @@ test_vmulswzx_vsvvl() {
   vr3 = _vel_vmulswzx_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vmulswzx_vvvmvl() {
+  // CHECK-LABEL: @test_vmulswzx_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmulswzx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmulswzx_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vmulswzx_vsvmvl() {
+  // CHECK-LABEL: @test_vmulswzx_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmulswzx.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmulswzx_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vmulsl_vvvl() {
   // CHECK-LABEL: @test_vmulsl_vvvl
@@ -1060,6 +1552,20 @@ test_vmulsl_vsvvl() {
   vr3 = _vel_vmulsl_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vmulsl_vvvmvl() {
+  // CHECK-LABEL: @test_vmulsl_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmulsl.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmulsl_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vmulsl_vsvmvl() {
+  // CHECK-LABEL: @test_vmulsl_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmulsl.vsvmvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmulsl_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vmulslw_vvvl() {
   // CHECK-LABEL: @test_vmulslw_vvvl
@@ -1116,6 +1622,20 @@ test_vdivul_vsvvl() {
   vr3 = _vel_vdivul_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vdivul_vvvmvl() {
+  // CHECK-LABEL: @test_vdivul_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivul.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivul_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vdivul_vsvmvl() {
+  // CHECK-LABEL: @test_vdivul_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivul.vsvmvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivul_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vdivuw_vvvl() {
   // CHECK-LABEL: @test_vdivuw_vvvl
@@ -1144,6 +1664,20 @@ test_vdivuw_vsvvl() {
   vr3 = _vel_vdivuw_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vdivuw_vvvmvl() {
+  // CHECK-LABEL: @test_vdivuw_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivuw.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivuw_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vdivuw_vsvmvl() {
+  // CHECK-LABEL: @test_vdivuw_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivuw.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivuw_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vdivul_vvsl() {
   // CHECK-LABEL: @test_vdivul_vvsl
@@ -1158,6 +1692,13 @@ test_vdivul_vvsvl() {
   vr3 = _vel_vdivul_vvsvl(vr1, v2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vdivul_vvsmvl() {
+  // CHECK-LABEL: @test_vdivul_vvsmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivul.vvsmvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivul_vvsmvl(vr1, v2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vdivuw_vvsl() {
   // CHECK-LABEL: @test_vdivuw_vvsl
@@ -1172,6 +1713,13 @@ test_vdivuw_vvsvl() {
   vr3 = _vel_vdivuw_vvsvl(vr1, v2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vdivuw_vvsmvl() {
+  // CHECK-LABEL: @test_vdivuw_vvsmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivuw.vvsmvl(<256 x double> %{{.*}}, i32 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivuw_vvsmvl(vr1, v2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vdivswsx_vvvl() {
   // CHECK-LABEL: @test_vdivswsx_vvvl
@@ -1200,6 +1748,20 @@ test_vdivswsx_vsvvl() {
   vr3 = _vel_vdivswsx_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vdivswsx_vvvmvl() {
+  // CHECK-LABEL: @test_vdivswsx_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivswsx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivswsx_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vdivswsx_vsvmvl() {
+  // CHECK-LABEL: @test_vdivswsx_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivswsx.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivswsx_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vdivswzx_vvvl() {
   // CHECK-LABEL: @test_vdivswzx_vvvl
@@ -1228,6 +1790,20 @@ test_vdivswzx_vsvvl() {
   vr3 = _vel_vdivswzx_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vdivswzx_vvvmvl() {
+  // CHECK-LABEL: @test_vdivswzx_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivswzx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivswzx_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vdivswzx_vsvmvl() {
+  // CHECK-LABEL: @test_vdivswzx_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivswzx.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivswzx_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vdivswsx_vvsl() {
   // CHECK-LABEL: @test_vdivswsx_vvsl
@@ -1242,6 +1818,13 @@ test_vdivswsx_vvsvl() {
   vr3 = _vel_vdivswsx_vvsvl(vr1, v2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vdivswsx_vvsmvl() {
+  // CHECK-LABEL: @test_vdivswsx_vvsmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivswsx.vvsmvl(<256 x double> %{{.*}}, i32 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivswsx_vvsmvl(vr1, v2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vdivswzx_vvsl() {
   // CHECK-LABEL: @test_vdivswzx_vvsl
@@ -1256,6 +1839,13 @@ test_vdivswzx_vvsvl() {
   vr3 = _vel_vdivswzx_vvsvl(vr1, v2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vdivswzx_vvsmvl() {
+  // CHECK-LABEL: @test_vdivswzx_vvsmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivswzx.vvsmvl(<256 x double> %{{.*}}, i32 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivswzx_vvsmvl(vr1, v2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vdivsl_vvvl() {
   // CHECK-LABEL: @test_vdivsl_vvvl
@@ -1285,10 +1875,24 @@ test_vdivsl_vsvvl() {
 }
 
 void __attribute__((noinline))
-test_vdivsl_vvsl() {
-  // CHECK-LABEL: @test_vdivsl_vvsl
-  // CHECK: call <256 x double> @llvm.ve.vl.vdivsl.vvsl(<256 x double> %{{.*}}, i64 %{{.*}}, i32 256)
-  vr3 = _vel_vdivsl_vvsl(vr1, v2, 256);
+test_vdivsl_vvvmvl() {
+  // CHECK-LABEL: @test_vdivsl_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivsl.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivsl_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vdivsl_vsvmvl() {
+  // CHECK-LABEL: @test_vdivsl_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivsl.vsvmvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivsl_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vdivsl_vvsl() {
+  // CHECK-LABEL: @test_vdivsl_vvsl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivsl.vvsl(<256 x double> %{{.*}}, i64 %{{.*}}, i32 256)
+  vr3 = _vel_vdivsl_vvsl(vr1, v2, 256);
 }
 
 void __attribute__((noinline))
@@ -1298,6 +1902,13 @@ test_vdivsl_vvsvl() {
   vr3 = _vel_vdivsl_vvsvl(vr1, v2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vdivsl_vvsmvl() {
+  // CHECK-LABEL: @test_vdivsl_vvsmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivsl.vvsmvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivsl_vvsmvl(vr1, v2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vcmpul_vvvl() {
   // CHECK-LABEL: @test_vcmpul_vvvl
@@ -1326,6 +1937,20 @@ test_vcmpul_vsvvl() {
   vr3 = _vel_vcmpul_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vcmpul_vvvmvl() {
+  // CHECK-LABEL: @test_vcmpul_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcmpul.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcmpul_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vcmpul_vsvmvl() {
+  // CHECK-LABEL: @test_vcmpul_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcmpul.vsvmvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcmpul_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vcmpuw_vvvl() {
   // CHECK-LABEL: @test_vcmpuw_vvvl
@@ -1354,6 +1979,20 @@ test_vcmpuw_vsvvl() {
   vr3 = _vel_vcmpuw_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vcmpuw_vvvmvl() {
+  // CHECK-LABEL: @test_vcmpuw_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcmpuw.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcmpuw_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vcmpuw_vsvmvl() {
+  // CHECK-LABEL: @test_vcmpuw_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcmpuw.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcmpuw_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_pvcmpu_vvvl() {
   // CHECK-LABEL: @test_pvcmpu_vvvl
@@ -1382,6 +2021,20 @@ test_pvcmpu_vsvvl() {
   vr3 = _vel_pvcmpu_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_pvcmpu_vvvMvl() {
+  // CHECK-LABEL: @test_pvcmpu_vvvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvcmpu.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvcmpu_vvvMvl(vr1, vr2, vm1_512, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvcmpu_vsvMvl() {
+  // CHECK-LABEL: @test_pvcmpu_vsvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvcmpu.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvcmpu_vsvMvl(v1, vr2, vm1_512, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vcmpswsx_vvvl() {
   // CHECK-LABEL: @test_vcmpswsx_vvvl
@@ -1410,6 +2063,20 @@ test_vcmpswsx_vsvvl() {
   vr3 = _vel_vcmpswsx_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vcmpswsx_vvvmvl() {
+  // CHECK-LABEL: @test_vcmpswsx_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcmpswsx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcmpswsx_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vcmpswsx_vsvmvl() {
+  // CHECK-LABEL: @test_vcmpswsx_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcmpswsx.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcmpswsx_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vcmpswzx_vvvl() {
   // CHECK-LABEL: @test_vcmpswzx_vvvl
@@ -1438,6 +2105,20 @@ test_vcmpswzx_vsvvl() {
   vr3 = _vel_vcmpswzx_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vcmpswzx_vvvmvl() {
+  // CHECK-LABEL: @test_vcmpswzx_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcmpswzx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcmpswzx_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vcmpswzx_vsvmvl() {
+  // CHECK-LABEL: @test_vcmpswzx_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcmpswzx.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcmpswzx_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_pvcmps_vvvl() {
   // CHECK-LABEL: @test_pvcmps_vvvl
@@ -1466,6 +2147,20 @@ test_pvcmps_vsvvl() {
   vr3 = _vel_pvcmps_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_pvcmps_vvvMvl() {
+  // CHECK-LABEL: @test_pvcmps_vvvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvcmps.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvcmps_vvvMvl(vr1, vr2, vm1_512, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvcmps_vsvMvl() {
+  // CHECK-LABEL: @test_pvcmps_vsvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvcmps.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvcmps_vsvMvl(v1, vr2, vm1_512, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vcmpsl_vvvl() {
   // CHECK-LABEL: @test_vcmpsl_vvvl
@@ -1494,6 +2189,20 @@ test_vcmpsl_vsvvl() {
   vr3 = _vel_vcmpsl_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vcmpsl_vvvmvl() {
+  // CHECK-LABEL: @test_vcmpsl_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcmpsl.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcmpsl_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vcmpsl_vsvmvl() {
+  // CHECK-LABEL: @test_vcmpsl_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcmpsl.vsvmvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcmpsl_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vmaxswsx_vvvl() {
   // CHECK-LABEL: @test_vmaxswsx_vvvl
@@ -1522,6 +2231,20 @@ test_vmaxswsx_vsvvl() {
   vr3 = _vel_vmaxswsx_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vmaxswsx_vvvmvl() {
+  // CHECK-LABEL: @test_vmaxswsx_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmaxswsx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmaxswsx_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vmaxswsx_vsvmvl() {
+  // CHECK-LABEL: @test_vmaxswsx_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmaxswsx.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmaxswsx_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vmaxswzx_vvvl() {
   // CHECK-LABEL: @test_vmaxswzx_vvvl
@@ -1550,6 +2273,20 @@ test_vmaxswzx_vsvvl() {
   vr3 = _vel_vmaxswzx_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vmaxswzx_vvvmvl() {
+  // CHECK-LABEL: @test_vmaxswzx_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmaxswzx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmaxswzx_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vmaxswzx_vsvmvl() {
+  // CHECK-LABEL: @test_vmaxswzx_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmaxswzx.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmaxswzx_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_pvmaxs_vvvl() {
   // CHECK-LABEL: @test_pvmaxs_vvvl
@@ -1578,6 +2315,20 @@ test_pvmaxs_vsvvl() {
   vr3 = _vel_pvmaxs_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_pvmaxs_vvvMvl() {
+  // CHECK-LABEL: @test_pvmaxs_vvvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvmaxs.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvmaxs_vvvMvl(vr1, vr2, vm1_512, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvmaxs_vsvMvl() {
+  // CHECK-LABEL: @test_pvmaxs_vsvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvmaxs.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvmaxs_vsvMvl(v1, vr2, vm1_512, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vminswsx_vvvl() {
   // CHECK-LABEL: @test_vminswsx_vvvl
@@ -1606,6 +2357,20 @@ test_vminswsx_vsvvl() {
   vr3 = _vel_vminswsx_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vminswsx_vvvmvl() {
+  // CHECK-LABEL: @test_vminswsx_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vminswsx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vminswsx_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vminswsx_vsvmvl() {
+  // CHECK-LABEL: @test_vminswsx_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vminswsx.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vminswsx_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vminswzx_vvvl() {
   // CHECK-LABEL: @test_vminswzx_vvvl
@@ -1634,6 +2399,20 @@ test_vminswzx_vsvvl() {
   vr3 = _vel_vminswzx_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vminswzx_vvvmvl() {
+  // CHECK-LABEL: @test_vminswzx_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vminswzx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vminswzx_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vminswzx_vsvmvl() {
+  // CHECK-LABEL: @test_vminswzx_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vminswzx.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vminswzx_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_pvmins_vvvl() {
   // CHECK-LABEL: @test_pvmins_vvvl
@@ -1662,6 +2441,20 @@ test_pvmins_vsvvl() {
   vr3 = _vel_pvmins_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_pvmins_vvvMvl() {
+  // CHECK-LABEL: @test_pvmins_vvvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvmins.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvmins_vvvMvl(vr1, vr2, vm1_512, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvmins_vsvMvl() {
+  // CHECK-LABEL: @test_pvmins_vsvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvmins.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvmins_vsvMvl(v1, vr2, vm1_512, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vmaxsl_vvvl() {
   // CHECK-LABEL: @test_vmaxsl_vvvl
@@ -1690,6 +2483,20 @@ test_vmaxsl_vsvvl() {
   vr3 = _vel_vmaxsl_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vmaxsl_vvvmvl() {
+  // CHECK-LABEL: @test_vmaxsl_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmaxsl.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmaxsl_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vmaxsl_vsvmvl() {
+  // CHECK-LABEL: @test_vmaxsl_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmaxsl.vsvmvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmaxsl_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vminsl_vvvl() {
   // CHECK-LABEL: @test_vminsl_vvvl
@@ -1718,6 +2525,20 @@ test_vminsl_vsvvl() {
   vr3 = _vel_vminsl_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vminsl_vvvmvl() {
+  // CHECK-LABEL: @test_vminsl_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vminsl.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vminsl_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vminsl_vsvmvl() {
+  // CHECK-LABEL: @test_vminsl_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vminsl.vsvmvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vminsl_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vand_vvvl() {
   // CHECK-LABEL: @test_vand_vvvl
@@ -1746,6 +2567,20 @@ test_vand_vsvvl() {
   vr3 = _vel_vand_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vand_vvvmvl() {
+  // CHECK-LABEL: @test_vand_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vand.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vand_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vand_vsvmvl() {
+  // CHECK-LABEL: @test_vand_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vand.vsvmvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vand_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_pvand_vvvl() {
   // CHECK-LABEL: @test_pvand_vvvl
@@ -1774,6 +2609,20 @@ test_pvand_vsvvl() {
   vr3 = _vel_pvand_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_pvand_vvvMvl() {
+  // CHECK-LABEL: @test_pvand_vvvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvand.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvand_vvvMvl(vr1, vr2, vm1_512, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvand_vsvMvl() {
+  // CHECK-LABEL: @test_pvand_vsvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvand.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvand_vsvMvl(v1, vr2, vm1_512, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vor_vvvl() {
   // CHECK-LABEL: @test_vor_vvvl
@@ -1802,6 +2651,20 @@ test_vor_vsvvl() {
   vr3 = _vel_vor_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vor_vvvmvl() {
+  // CHECK-LABEL: @test_vor_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vor.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vor_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vor_vsvmvl() {
+  // CHECK-LABEL: @test_vor_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vor.vsvmvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vor_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_pvor_vvvl() {
   // CHECK-LABEL: @test_pvor_vvvl
@@ -1830,6 +2693,20 @@ test_pvor_vsvvl() {
   vr3 = _vel_pvor_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_pvor_vvvMvl() {
+  // CHECK-LABEL: @test_pvor_vvvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvor.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvor_vvvMvl(vr1, vr2, vm1_512, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvor_vsvMvl() {
+  // CHECK-LABEL: @test_pvor_vsvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvor.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvor_vsvMvl(v1, vr2, vm1_512, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vxor_vvvl() {
   // CHECK-LABEL: @test_vxor_vvvl
@@ -1858,6 +2735,20 @@ test_vxor_vsvvl() {
   vr3 = _vel_vxor_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vxor_vvvmvl() {
+  // CHECK-LABEL: @test_vxor_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vxor.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vxor_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vxor_vsvmvl() {
+  // CHECK-LABEL: @test_vxor_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vxor.vsvmvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vxor_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_pvxor_vvvl() {
   // CHECK-LABEL: @test_pvxor_vvvl
@@ -1886,6 +2777,20 @@ test_pvxor_vsvvl() {
   vr3 = _vel_pvxor_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_pvxor_vvvMvl() {
+  // CHECK-LABEL: @test_pvxor_vvvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvxor.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvxor_vvvMvl(vr1, vr2, vm1_512, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvxor_vsvMvl() {
+  // CHECK-LABEL: @test_pvxor_vsvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvxor.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvxor_vsvMvl(v1, vr2, vm1_512, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_veqv_vvvl() {
   // CHECK-LABEL: @test_veqv_vvvl
@@ -1914,6 +2819,20 @@ test_veqv_vsvvl() {
   vr3 = _vel_veqv_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_veqv_vvvmvl() {
+  // CHECK-LABEL: @test_veqv_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.veqv.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_veqv_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_veqv_vsvmvl() {
+  // CHECK-LABEL: @test_veqv_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.veqv.vsvmvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_veqv_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_pveqv_vvvl() {
   // CHECK-LABEL: @test_pveqv_vvvl
@@ -1942,6 +2861,20 @@ test_pveqv_vsvvl() {
   vr3 = _vel_pveqv_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_pveqv_vvvMvl() {
+  // CHECK-LABEL: @test_pveqv_vvvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pveqv.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pveqv_vvvMvl(vr1, vr2, vm1_512, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pveqv_vsvMvl() {
+  // CHECK-LABEL: @test_pveqv_vsvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pveqv.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pveqv_vsvMvl(v1, vr2, vm1_512, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vldz_vvl() {
   // CHECK-LABEL: @test_vldz_vvl
@@ -1956,6 +2889,13 @@ test_vldz_vvvl() {
   vr3 = _vel_vldz_vvvl(vr1, vr2, 256);
 }
 
+void __attribute__((noinline))
+test_vldz_vvmvl() {
+  // CHECK-LABEL: @test_vldz_vvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vldz.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vldz_vvmvl(vr1, vm1, vr2, 256);
+}
+
 void __attribute__((noinline))
 test_pvldzlo_vvl() {
   // CHECK-LABEL: @test_pvldzlo_vvl
@@ -1970,6 +2910,13 @@ test_pvldzlo_vvvl() {
   vr3 = _vel_pvldzlo_vvvl(vr1, vr2, 256);
 }
 
+void __attribute__((noinline))
+test_pvldzlo_vvmvl() {
+  // CHECK-LABEL: @test_pvldzlo_vvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvldzlo.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvldzlo_vvmvl(vr1, vm1, vr2, 256);
+}
+
 void __attribute__((noinline))
 test_pvldzup_vvl() {
   // CHECK-LABEL: @test_pvldzup_vvl
@@ -1984,6 +2931,13 @@ test_pvldzup_vvvl() {
   vr3 = _vel_pvldzup_vvvl(vr1, vr2, 256);
 }
 
+void __attribute__((noinline))
+test_pvldzup_vvmvl() {
+  // CHECK-LABEL: @test_pvldzup_vvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvldzup.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvldzup_vvmvl(vr1, vm1, vr2, 256);
+}
+
 void __attribute__((noinline))
 test_pvldz_vvl() {
   // CHECK-LABEL: @test_pvldz_vvl
@@ -1998,6 +2952,13 @@ test_pvldz_vvvl() {
   vr3 = _vel_pvldz_vvvl(vr1, vr2, 256);
 }
 
+void __attribute__((noinline))
+test_pvldz_vvMvl() {
+  // CHECK-LABEL: @test_pvldz_vvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvldz.vvMvl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvldz_vvMvl(vr1, vm1_512, vr2, 256);
+}
+
 void __attribute__((noinline))
 test_vpcnt_vvl() {
   // CHECK-LABEL: @test_vpcnt_vvl
@@ -2012,6 +2973,13 @@ test_vpcnt_vvvl() {
   vr3 = _vel_vpcnt_vvvl(vr1, vr2, 256);
 }
 
+void __attribute__((noinline))
+test_vpcnt_vvmvl() {
+  // CHECK-LABEL: @test_vpcnt_vvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vpcnt.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vpcnt_vvmvl(vr1, vm1, vr2, 256);
+}
+
 void __attribute__((noinline))
 test_pvpcntlo_vvl() {
   // CHECK-LABEL: @test_pvpcntlo_vvl
@@ -2026,6 +2994,13 @@ test_pvpcntlo_vvvl() {
   vr3 = _vel_pvpcntlo_vvvl(vr1, vr2, 256);
 }
 
+void __attribute__((noinline))
+test_pvpcntlo_vvmvl() {
+  // CHECK-LABEL: @test_pvpcntlo_vvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvpcntlo.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvpcntlo_vvmvl(vr1, vm1, vr2, 256);
+}
+
 void __attribute__((noinline))
 test_pvpcntup_vvl() {
   // CHECK-LABEL: @test_pvpcntup_vvl
@@ -2040,6 +3015,13 @@ test_pvpcntup_vvvl() {
   vr3 = _vel_pvpcntup_vvvl(vr1, vr2, 256);
 }
 
+void __attribute__((noinline))
+test_pvpcntup_vvmvl() {
+  // CHECK-LABEL: @test_pvpcntup_vvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvpcntup.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvpcntup_vvmvl(vr1, vm1, vr2, 256);
+}
+
 void __attribute__((noinline))
 test_pvpcnt_vvl() {
   // CHECK-LABEL: @test_pvpcnt_vvl
@@ -2054,6 +3036,13 @@ test_pvpcnt_vvvl() {
   vr3 = _vel_pvpcnt_vvvl(vr1, vr2, 256);
 }
 
+void __attribute__((noinline))
+test_pvpcnt_vvMvl() {
+  // CHECK-LABEL: @test_pvpcnt_vvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvpcnt.vvMvl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvpcnt_vvMvl(vr1, vm1_512, vr2, 256);
+}
+
 void __attribute__((noinline))
 test_vbrv_vvl() {
   // CHECK-LABEL: @test_vbrv_vvl
@@ -2068,6 +3057,13 @@ test_vbrv_vvvl() {
   vr3 = _vel_vbrv_vvvl(vr1, vr2, 256);
 }
 
+void __attribute__((noinline))
+test_vbrv_vvmvl() {
+  // CHECK-LABEL: @test_vbrv_vvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vbrv.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vbrv_vvmvl(vr1, vm1, vr2, 256);
+}
+
 void __attribute__((noinline))
 test_pvbrvlo_vvl() {
   // CHECK-LABEL: @test_pvbrvlo_vvl
@@ -2082,6 +3078,13 @@ test_pvbrvlo_vvvl() {
   vr3 = _vel_pvbrvlo_vvvl(vr1, vr2, 256);
 }
 
+void __attribute__((noinline))
+test_pvbrvlo_vvmvl() {
+  // CHECK-LABEL: @test_pvbrvlo_vvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvbrvlo.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvbrvlo_vvmvl(vr1, vm1, vr2, 256);
+}
+
 void __attribute__((noinline))
 test_pvbrvup_vvl() {
   // CHECK-LABEL: @test_pvbrvup_vvl
@@ -2096,6 +3099,13 @@ test_pvbrvup_vvvl() {
   vr3 = _vel_pvbrvup_vvvl(vr1, vr2, 256);
 }
 
+void __attribute__((noinline))
+test_pvbrvup_vvmvl() {
+  // CHECK-LABEL: @test_pvbrvup_vvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvbrvup.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvbrvup_vvmvl(vr1, vm1, vr2, 256);
+}
+
 void __attribute__((noinline))
 test_pvbrv_vvl() {
   // CHECK-LABEL: @test_pvbrv_vvl
@@ -2110,6 +3120,13 @@ test_pvbrv_vvvl() {
   vr3 = _vel_pvbrv_vvvl(vr1, vr2, 256);
 }
 
+void __attribute__((noinline))
+test_pvbrv_vvMvl() {
+  // CHECK-LABEL: @test_pvbrv_vvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvbrv.vvMvl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvbrv_vvMvl(vr1, vm1_512, vr2, 256);
+}
+
 void __attribute__((noinline))
 test_vseq_vl() {
   // CHECK-LABEL: @test_vseq_vl
@@ -2194,6 +3211,20 @@ test_vsll_vvsvl() {
   vr3 = _vel_vsll_vvsvl(vr1, v2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vsll_vvvmvl() {
+  // CHECK-LABEL: @test_vsll_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsll.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsll_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vsll_vvsmvl() {
+  // CHECK-LABEL: @test_vsll_vvsmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsll.vvsmvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsll_vvsmvl(vr1, v2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_pvsll_vvvl() {
   // CHECK-LABEL: @test_pvsll_vvvl
@@ -2222,6 +3253,20 @@ test_pvsll_vvsvl() {
   vr3 = _vel_pvsll_vvsvl(vr1, v2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_pvsll_vvvMvl() {
+  // CHECK-LABEL: @test_pvsll_vvvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsll.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvsll_vvvMvl(vr1, vr2, vm1_512, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvsll_vvsMvl() {
+  // CHECK-LABEL: @test_pvsll_vvsMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsll.vvsMvl(<256 x double> %{{.*}}, i64 %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvsll_vvsMvl(vr1, v2, vm1_512, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vsrl_vvvl() {
   // CHECK-LABEL: @test_vsrl_vvvl
@@ -2250,6 +3295,20 @@ test_vsrl_vvsvl() {
   vr3 = _vel_vsrl_vvsvl(vr1, v2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vsrl_vvvmvl() {
+  // CHECK-LABEL: @test_vsrl_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsrl.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsrl_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vsrl_vvsmvl() {
+  // CHECK-LABEL: @test_vsrl_vvsmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsrl.vvsmvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsrl_vvsmvl(vr1, v2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_pvsrl_vvvl() {
   // CHECK-LABEL: @test_pvsrl_vvvl
@@ -2278,6 +3337,20 @@ test_pvsrl_vvsvl() {
   vr3 = _vel_pvsrl_vvsvl(vr1, v2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_pvsrl_vvvMvl() {
+  // CHECK-LABEL: @test_pvsrl_vvvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsrl.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvsrl_vvvMvl(vr1, vr2, vm1_512, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvsrl_vvsMvl() {
+  // CHECK-LABEL: @test_pvsrl_vvsMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsrl.vvsMvl(<256 x double> %{{.*}}, i64 %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvsrl_vvsMvl(vr1, v2, vm1_512, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vslawsx_vvvl() {
   // CHECK-LABEL: @test_vslawsx_vvvl
@@ -2306,6 +3379,20 @@ test_vslawsx_vvsvl() {
   vr3 = _vel_vslawsx_vvsvl(vr1, v2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vslawsx_vvvmvl() {
+  // CHECK-LABEL: @test_vslawsx_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vslawsx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vslawsx_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vslawsx_vvsmvl() {
+  // CHECK-LABEL: @test_vslawsx_vvsmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vslawsx.vvsmvl(<256 x double> %{{.*}}, i32 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vslawsx_vvsmvl(vr1, v2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vslawzx_vvvl() {
   // CHECK-LABEL: @test_vslawzx_vvvl
@@ -2334,6 +3421,20 @@ test_vslawzx_vvsvl() {
   vr3 = _vel_vslawzx_vvsvl(vr1, v2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vslawzx_vvvmvl() {
+  // CHECK-LABEL: @test_vslawzx_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vslawzx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vslawzx_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vslawzx_vvsmvl() {
+  // CHECK-LABEL: @test_vslawzx_vvsmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vslawzx.vvsmvl(<256 x double> %{{.*}}, i32 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vslawzx_vvsmvl(vr1, v2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_pvsla_vvvl() {
   // CHECK-LABEL: @test_pvsla_vvvl
@@ -2362,6 +3463,20 @@ test_pvsla_vvsvl() {
   vr3 = _vel_pvsla_vvsvl(vr1, v2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_pvsla_vvvMvl() {
+  // CHECK-LABEL: @test_pvsla_vvvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsla.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvsla_vvvMvl(vr1, vr2, vm1_512, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvsla_vvsMvl() {
+  // CHECK-LABEL: @test_pvsla_vvsMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsla.vvsMvl(<256 x double> %{{.*}}, i64 %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvsla_vvsMvl(vr1, v2, vm1_512, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vslal_vvvl() {
   // CHECK-LABEL: @test_vslal_vvvl
@@ -2390,6 +3505,20 @@ test_vslal_vvsvl() {
   vr3 = _vel_vslal_vvsvl(vr1, v2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vslal_vvvmvl() {
+  // CHECK-LABEL: @test_vslal_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vslal.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vslal_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vslal_vvsmvl() {
+  // CHECK-LABEL: @test_vslal_vvsmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vslal.vvsmvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vslal_vvsmvl(vr1, v2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vsrawsx_vvvl() {
   // CHECK-LABEL: @test_vsrawsx_vvvl
@@ -2418,6 +3547,20 @@ test_vsrawsx_vvsvl() {
   vr3 = _vel_vsrawsx_vvsvl(vr1, v2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vsrawsx_vvvmvl() {
+  // CHECK-LABEL: @test_vsrawsx_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsrawsx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsrawsx_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vsrawsx_vvsmvl() {
+  // CHECK-LABEL: @test_vsrawsx_vvsmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsrawsx.vvsmvl(<256 x double> %{{.*}}, i32 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsrawsx_vvsmvl(vr1, v2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vsrawzx_vvvl() {
   // CHECK-LABEL: @test_vsrawzx_vvvl
@@ -2446,6 +3589,20 @@ test_vsrawzx_vvsvl() {
   vr3 = _vel_vsrawzx_vvsvl(vr1, v2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vsrawzx_vvvmvl() {
+  // CHECK-LABEL: @test_vsrawzx_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsrawzx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsrawzx_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vsrawzx_vvsmvl() {
+  // CHECK-LABEL: @test_vsrawzx_vvsmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsrawzx.vvsmvl(<256 x double> %{{.*}}, i32 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsrawzx_vvsmvl(vr1, v2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_pvsra_vvvl() {
   // CHECK-LABEL: @test_pvsra_vvvl
@@ -2474,6 +3631,20 @@ test_pvsra_vvsvl() {
   vr3 = _vel_pvsra_vvsvl(vr1, v2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_pvsra_vvvMvl() {
+  // CHECK-LABEL: @test_pvsra_vvvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsra.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvsra_vvvMvl(vr1, vr2, vm1_512, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvsra_vvsMvl() {
+  // CHECK-LABEL: @test_pvsra_vvsMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsra.vvsMvl(<256 x double> %{{.*}}, i64 %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvsra_vvsMvl(vr1, v2, vm1_512, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vsral_vvvl() {
   // CHECK-LABEL: @test_vsral_vvvl
@@ -2503,9 +3674,23 @@ test_vsral_vvsvl() {
 }
 
 void __attribute__((noinline))
-test_vsfa_vvssl() {
-  // CHECK-LABEL: @test_vsfa_vvssl
-  // CHECK: call <256 x double> @llvm.ve.vl.vsfa.vvssl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256)
+test_vsral_vvvmvl() {
+  // CHECK-LABEL: @test_vsral_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsral.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsral_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vsral_vvsmvl() {
+  // CHECK-LABEL: @test_vsral_vvsmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsral.vvsmvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsral_vvsmvl(vr1, v2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vsfa_vvssl() {
+  // CHECK-LABEL: @test_vsfa_vvssl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsfa.vvssl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256)
   vr3 = _vel_vsfa_vvssl(vr1, v1, v2, 256);
 }
 
@@ -2516,6 +3701,13 @@ test_vsfa_vvssvl() {
   vr3 = _vel_vsfa_vvssvl(vr1, v1, v2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vsfa_vvssmvl() {
+  // CHECK-LABEL: @test_vsfa_vvssmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsfa.vvssmvl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsfa_vvssmvl(vr1, v1, v2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vfaddd_vvvl() {
   // CHECK-LABEL: @test_vfaddd_vvvl
@@ -2544,6 +3736,20 @@ test_vfaddd_vsvvl() {
   vr3 = _vel_vfaddd_vsvvl(vd1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vfaddd_vvvmvl() {
+  // CHECK-LABEL: @test_vfaddd_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfaddd.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfaddd_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfaddd_vsvmvl() {
+  // CHECK-LABEL: @test_vfaddd_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfaddd.vsvmvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfaddd_vsvmvl(vd1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vfadds_vvvl() {
   // CHECK-LABEL: @test_vfadds_vvvl
@@ -2572,6 +3778,20 @@ test_vfadds_vsvvl() {
   vr3 = _vel_vfadds_vsvvl(vf1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vfadds_vvvmvl() {
+  // CHECK-LABEL: @test_vfadds_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfadds.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfadds_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfadds_vsvmvl() {
+  // CHECK-LABEL: @test_vfadds_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfadds.vsvmvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfadds_vsvmvl(vf1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_pvfadd_vvvl() {
   // CHECK-LABEL: @test_pvfadd_vvvl
@@ -2600,6 +3820,20 @@ test_pvfadd_vsvvl() {
   vr3 = _vel_pvfadd_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_pvfadd_vvvMvl() {
+  // CHECK-LABEL: @test_pvfadd_vvvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfadd.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfadd_vvvMvl(vr1, vr2, vm1_512, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvfadd_vsvMvl() {
+  // CHECK-LABEL: @test_pvfadd_vsvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfadd.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfadd_vsvMvl(v1, vr2, vm1_512, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vfsubd_vvvl() {
   // CHECK-LABEL: @test_vfsubd_vvvl
@@ -2628,6 +3862,20 @@ test_vfsubd_vsvvl() {
   vr3 = _vel_vfsubd_vsvvl(vd1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vfsubd_vvvmvl() {
+  // CHECK-LABEL: @test_vfsubd_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfsubd.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfsubd_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfsubd_vsvmvl() {
+  // CHECK-LABEL: @test_vfsubd_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfsubd.vsvmvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfsubd_vsvmvl(vd1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vfsubs_vvvl() {
   // CHECK-LABEL: @test_vfsubs_vvvl
@@ -2656,6 +3904,20 @@ test_vfsubs_vsvvl() {
   vr3 = _vel_vfsubs_vsvvl(vf1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vfsubs_vvvmvl() {
+  // CHECK-LABEL: @test_vfsubs_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfsubs.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfsubs_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfsubs_vsvmvl() {
+  // CHECK-LABEL: @test_vfsubs_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfsubs.vsvmvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfsubs_vsvmvl(vf1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_pvfsub_vvvl() {
   // CHECK-LABEL: @test_pvfsub_vvvl
@@ -2684,6 +3946,20 @@ test_pvfsub_vsvvl() {
   vr3 = _vel_pvfsub_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_pvfsub_vvvMvl() {
+  // CHECK-LABEL: @test_pvfsub_vvvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfsub.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfsub_vvvMvl(vr1, vr2, vm1_512, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvfsub_vsvMvl() {
+  // CHECK-LABEL: @test_pvfsub_vsvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfsub.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfsub_vsvMvl(v1, vr2, vm1_512, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vfmuld_vvvl() {
   // CHECK-LABEL: @test_vfmuld_vvvl
@@ -2712,6 +3988,20 @@ test_vfmuld_vsvvl() {
   vr3 = _vel_vfmuld_vsvvl(vd1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vfmuld_vvvmvl() {
+  // CHECK-LABEL: @test_vfmuld_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmuld.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmuld_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfmuld_vsvmvl() {
+  // CHECK-LABEL: @test_vfmuld_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmuld.vsvmvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmuld_vsvmvl(vd1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vfmuls_vvvl() {
   // CHECK-LABEL: @test_vfmuls_vvvl
@@ -2740,6 +4030,20 @@ test_vfmuls_vsvvl() {
   vr3 = _vel_vfmuls_vsvvl(vf1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vfmuls_vvvmvl() {
+  // CHECK-LABEL: @test_vfmuls_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmuls.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmuls_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfmuls_vsvmvl() {
+  // CHECK-LABEL: @test_vfmuls_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmuls.vsvmvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmuls_vsvmvl(vf1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_pvfmul_vvvl() {
   // CHECK-LABEL: @test_pvfmul_vvvl
@@ -2768,6 +4072,20 @@ test_pvfmul_vsvvl() {
   vr3 = _vel_pvfmul_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_pvfmul_vvvMvl() {
+  // CHECK-LABEL: @test_pvfmul_vvvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmul.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfmul_vvvMvl(vr1, vr2, vm1_512, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvfmul_vsvMvl() {
+  // CHECK-LABEL: @test_pvfmul_vsvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmul.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfmul_vsvMvl(v1, vr2, vm1_512, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vfdivd_vvvl() {
   // CHECK-LABEL: @test_vfdivd_vvvl
@@ -2796,6 +4114,20 @@ test_vfdivd_vsvvl() {
   vr3 = _vel_vfdivd_vsvvl(vd1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vfdivd_vvvmvl() {
+  // CHECK-LABEL: @test_vfdivd_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfdivd.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfdivd_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfdivd_vsvmvl() {
+  // CHECK-LABEL: @test_vfdivd_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfdivd.vsvmvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfdivd_vsvmvl(vd1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vfdivs_vvvl() {
   // CHECK-LABEL: @test_vfdivs_vvvl
@@ -2824,6 +4156,20 @@ test_vfdivs_vsvvl() {
   vr3 = _vel_vfdivs_vsvvl(vf1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vfdivs_vvvmvl() {
+  // CHECK-LABEL: @test_vfdivs_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfdivs.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfdivs_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfdivs_vsvmvl() {
+  // CHECK-LABEL: @test_vfdivs_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfdivs.vsvmvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfdivs_vsvmvl(vf1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vfsqrtd_vvl() {
   // CHECK-LABEL: @test_vfsqrtd_vvl
@@ -2838,6 +4184,7 @@ test_vfsqrtd_vvvl() {
   vr3 = _vel_vfsqrtd_vvvl(vr1, vr2, 256);
 }
 
+
 void __attribute__((noinline))
 test_vfsqrts_vvl() {
   // CHECK-LABEL: @test_vfsqrts_vvl
@@ -2880,6 +4227,20 @@ test_vfcmpd_vsvvl() {
   vr3 = _vel_vfcmpd_vsvvl(vd1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vfcmpd_vvvmvl() {
+  // CHECK-LABEL: @test_vfcmpd_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfcmpd.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfcmpd_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfcmpd_vsvmvl() {
+  // CHECK-LABEL: @test_vfcmpd_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfcmpd.vsvmvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfcmpd_vsvmvl(vd1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vfcmps_vvvl() {
   // CHECK-LABEL: @test_vfcmps_vvvl
@@ -2908,6 +4269,20 @@ test_vfcmps_vsvvl() {
   vr3 = _vel_vfcmps_vsvvl(vf1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vfcmps_vvvmvl() {
+  // CHECK-LABEL: @test_vfcmps_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfcmps.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfcmps_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfcmps_vsvmvl() {
+  // CHECK-LABEL: @test_vfcmps_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfcmps.vsvmvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfcmps_vsvmvl(vf1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_pvfcmp_vvvl() {
   // CHECK-LABEL: @test_pvfcmp_vvvl
@@ -2936,6 +4311,20 @@ test_pvfcmp_vsvvl() {
   vr3 = _vel_pvfcmp_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_pvfcmp_vvvMvl() {
+  // CHECK-LABEL: @test_pvfcmp_vvvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfcmp.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfcmp_vvvMvl(vr1, vr2, vm1_512, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvfcmp_vsvMvl() {
+  // CHECK-LABEL: @test_pvfcmp_vsvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfcmp.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfcmp_vsvMvl(v1, vr2, vm1_512, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vfmaxd_vvvl() {
   // CHECK-LABEL: @test_vfmaxd_vvvl
@@ -2964,6 +4353,20 @@ test_vfmaxd_vsvvl() {
   vr3 = _vel_vfmaxd_vsvvl(vd1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vfmaxd_vvvmvl() {
+  // CHECK-LABEL: @test_vfmaxd_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmaxd.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmaxd_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfmaxd_vsvmvl() {
+  // CHECK-LABEL: @test_vfmaxd_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmaxd.vsvmvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmaxd_vsvmvl(vd1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vfmaxs_vvvl() {
   // CHECK-LABEL: @test_vfmaxs_vvvl
@@ -2992,6 +4395,20 @@ test_vfmaxs_vsvvl() {
   vr3 = _vel_vfmaxs_vsvvl(vf1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vfmaxs_vvvmvl() {
+  // CHECK-LABEL: @test_vfmaxs_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmaxs.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmaxs_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfmaxs_vsvmvl() {
+  // CHECK-LABEL: @test_vfmaxs_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmaxs.vsvmvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmaxs_vsvmvl(vf1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_pvfmax_vvvl() {
   // CHECK-LABEL: @test_pvfmax_vvvl
@@ -3020,6 +4437,20 @@ test_pvfmax_vsvvl() {
   vr3 = _vel_pvfmax_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_pvfmax_vvvMvl() {
+  // CHECK-LABEL: @test_pvfmax_vvvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmax.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfmax_vvvMvl(vr1, vr2, vm1_512, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvfmax_vsvMvl() {
+  // CHECK-LABEL: @test_pvfmax_vsvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmax.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfmax_vsvMvl(v1, vr2, vm1_512, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vfmind_vvvl() {
   // CHECK-LABEL: @test_vfmind_vvvl
@@ -3048,6 +4479,20 @@ test_vfmind_vsvvl() {
   vr3 = _vel_vfmind_vsvvl(vd1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vfmind_vvvmvl() {
+  // CHECK-LABEL: @test_vfmind_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmind.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmind_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfmind_vsvmvl() {
+  // CHECK-LABEL: @test_vfmind_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmind.vsvmvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmind_vsvmvl(vd1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vfmins_vvvl() {
   // CHECK-LABEL: @test_vfmins_vvvl
@@ -3076,6 +4521,20 @@ test_vfmins_vsvvl() {
   vr3 = _vel_vfmins_vsvvl(vf1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vfmins_vvvmvl() {
+  // CHECK-LABEL: @test_vfmins_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmins.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmins_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfmins_vsvmvl() {
+  // CHECK-LABEL: @test_vfmins_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmins.vsvmvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmins_vsvmvl(vf1, vr2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_pvfmin_vvvl() {
   // CHECK-LABEL: @test_pvfmin_vvvl
@@ -3104,6 +4563,20 @@ test_pvfmin_vsvvl() {
   vr3 = _vel_pvfmin_vsvvl(v1, vr2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_pvfmin_vvvMvl() {
+  // CHECK-LABEL: @test_pvfmin_vvvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmin.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfmin_vvvMvl(vr1, vr2, vm1_512, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvfmin_vsvMvl() {
+  // CHECK-LABEL: @test_pvfmin_vsvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmin.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfmin_vsvMvl(v1, vr2, vm1_512, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vfmadd_vvvvl() {
   // CHECK-LABEL: @test_vfmadd_vvvvl
@@ -3146,6 +4619,27 @@ test_vfmadd_vvsvvl() {
   vr4 = _vel_vfmadd_vvsvvl(vr1, vd1, vr3, vr4, 256);
 }
 
+void __attribute__((noinline))
+test_vfmadd_vvvvmvl() {
+  // CHECK-LABEL: @test_vfmadd_vvvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmadd.vvvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmadd_vvvvmvl(vr1, vr2, vr3, vm1, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfmadd_vsvvmvl() {
+  // CHECK-LABEL: @test_vfmadd_vsvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmadd.vsvvmvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmadd_vsvvmvl(vd1, vr2, vr3, vm1, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfmadd_vvsvmvl() {
+  // CHECK-LABEL: @test_vfmadd_vvsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmadd.vvsvmvl(<256 x double> %{{.*}}, double %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmadd_vvsvmvl(vr1, vd1, vr3, vm1, vr4, 256);
+}
+
 void __attribute__((noinline))
 test_vfmads_vvvvl() {
   // CHECK-LABEL: @test_vfmads_vvvvl
@@ -3188,6 +4682,27 @@ test_vfmads_vvsvvl() {
   vr4 = _vel_vfmads_vvsvvl(vr1, vf1, vr3, vr4, 256);
 }
 
+void __attribute__((noinline))
+test_vfmads_vvvvmvl() {
+  // CHECK-LABEL: @test_vfmads_vvvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmads.vvvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmads_vvvvmvl(vr1, vr2, vr3, vm1, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfmads_vsvvmvl() {
+  // CHECK-LABEL: @test_vfmads_vsvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmads.vsvvmvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmads_vsvvmvl(vf1, vr2, vr3, vm1, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfmads_vvsvmvl() {
+  // CHECK-LABEL: @test_vfmads_vvsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmads.vvsvmvl(<256 x double> %{{.*}}, float %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmads_vvsvmvl(vr1, vf1, vr3, vm1, vr4, 256);
+}
+
 void __attribute__((noinline))
 test_pvfmad_vvvvl() {
   // CHECK-LABEL: @test_pvfmad_vvvvl
@@ -3230,6 +4745,27 @@ test_pvfmad_vvsvvl() {
   vr4 = _vel_pvfmad_vvsvvl(vr1, v1, vr3, vr4, 256);
 }
 
+void __attribute__((noinline))
+test_pvfmad_vvvvMvl() {
+  // CHECK-LABEL: @test_pvfmad_vvvvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmad.vvvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfmad_vvvvMvl(vr1, vr2, vr3, vm1_512, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_pvfmad_vsvvMvl() {
+  // CHECK-LABEL: @test_pvfmad_vsvvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmad.vsvvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfmad_vsvvMvl(v1, vr2, vr3, vm1_512, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_pvfmad_vvsvMvl() {
+  // CHECK-LABEL: @test_pvfmad_vvsvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmad.vvsvMvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfmad_vvsvMvl(vr1, v1, vr3, vm1_512, vr4, 256);
+}
+
 void __attribute__((noinline))
 test_vfmsbd_vvvvl() {
   // CHECK-LABEL: @test_vfmsbd_vvvvl
@@ -3272,6 +4808,27 @@ test_vfmsbd_vvsvvl() {
   vr4 = _vel_vfmsbd_vvsvvl(vr1, vd1, vr3, vr4, 256);
 }
 
+void __attribute__((noinline))
+test_vfmsbd_vvvvmvl() {
+  // CHECK-LABEL: @test_vfmsbd_vvvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmsbd.vvvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmsbd_vvvvmvl(vr1, vr2, vr3, vm1, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfmsbd_vsvvmvl() {
+  // CHECK-LABEL: @test_vfmsbd_vsvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmsbd.vsvvmvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmsbd_vsvvmvl(vd1, vr2, vr3, vm1, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfmsbd_vvsvmvl() {
+  // CHECK-LABEL: @test_vfmsbd_vvsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmsbd.vvsvmvl(<256 x double> %{{.*}}, double %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmsbd_vvsvmvl(vr1, vd1, vr3, vm1, vr4, 256);
+}
+
 void __attribute__((noinline))
 test_vfmsbs_vvvvl() {
   // CHECK-LABEL: @test_vfmsbs_vvvvl
@@ -3314,6 +4871,27 @@ test_vfmsbs_vvsvvl() {
   vr4 = _vel_vfmsbs_vvsvvl(vr1, vf1, vr3, vr4, 256);
 }
 
+void __attribute__((noinline))
+test_vfmsbs_vvvvmvl() {
+  // CHECK-LABEL: @test_vfmsbs_vvvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmsbs.vvvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmsbs_vvvvmvl(vr1, vr2, vr3, vm1, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfmsbs_vsvvmvl() {
+  // CHECK-LABEL: @test_vfmsbs_vsvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmsbs.vsvvmvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmsbs_vsvvmvl(vf1, vr2, vr3, vm1, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfmsbs_vvsvmvl() {
+  // CHECK-LABEL: @test_vfmsbs_vvsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmsbs.vvsvmvl(<256 x double> %{{.*}}, float %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmsbs_vvsvmvl(vr1, vf1, vr3, vm1, vr4, 256);
+}
+
 void __attribute__((noinline))
 test_pvfmsb_vvvvl() {
   // CHECK-LABEL: @test_pvfmsb_vvvvl
@@ -3356,6 +4934,27 @@ test_pvfmsb_vvsvvl() {
   vr4 = _vel_pvfmsb_vvsvvl(vr1, v1, vr3, vr4, 256);
 }
 
+void __attribute__((noinline))
+test_pvfmsb_vvvvMvl() {
+  // CHECK-LABEL: @test_pvfmsb_vvvvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmsb.vvvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfmsb_vvvvMvl(vr1, vr2, vr3, vm1_512, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_pvfmsb_vsvvMvl() {
+  // CHECK-LABEL: @test_pvfmsb_vsvvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmsb.vsvvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfmsb_vsvvMvl(v1, vr2, vr3, vm1_512, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_pvfmsb_vvsvMvl() {
+  // CHECK-LABEL: @test_pvfmsb_vvsvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmsb.vvsvMvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfmsb_vvsvMvl(vr1, v1, vr3, vm1_512, vr4, 256);
+}
+
 void __attribute__((noinline))
 test_vfnmadd_vvvvl() {
   // CHECK-LABEL: @test_vfnmadd_vvvvl
@@ -3398,6 +4997,27 @@ test_vfnmadd_vvsvvl() {
   vr4 = _vel_vfnmadd_vvsvvl(vr1, vd1, vr3, vr4, 256);
 }
 
+void __attribute__((noinline))
+test_vfnmadd_vvvvmvl() {
+  // CHECK-LABEL: @test_vfnmadd_vvvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmadd.vvvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmadd_vvvvmvl(vr1, vr2, vr3, vm1, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfnmadd_vsvvmvl() {
+  // CHECK-LABEL: @test_vfnmadd_vsvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmadd.vsvvmvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmadd_vsvvmvl(vd1, vr2, vr3, vm1, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfnmadd_vvsvmvl() {
+  // CHECK-LABEL: @test_vfnmadd_vvsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmadd.vvsvmvl(<256 x double> %{{.*}}, double %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmadd_vvsvmvl(vr1, vd1, vr3, vm1, vr4, 256);
+}
+
 void __attribute__((noinline))
 test_vfnmads_vvvvl() {
   // CHECK-LABEL: @test_vfnmads_vvvvl
@@ -3440,6 +5060,27 @@ test_vfnmads_vvsvvl() {
   vr4 = _vel_vfnmads_vvsvvl(vr1, vf1, vr3, vr4, 256);
 }
 
+void __attribute__((noinline))
+test_vfnmads_vvvvmvl() {
+  // CHECK-LABEL: @test_vfnmads_vvvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmads.vvvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmads_vvvvmvl(vr1, vr2, vr3, vm1, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfnmads_vsvvmvl() {
+  // CHECK-LABEL: @test_vfnmads_vsvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmads.vsvvmvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmads_vsvvmvl(vf1, vr2, vr3, vm1, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfnmads_vvsvmvl() {
+  // CHECK-LABEL: @test_vfnmads_vvsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmads.vvsvmvl(<256 x double> %{{.*}}, float %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmads_vvsvmvl(vr1, vf1, vr3, vm1, vr4, 256);
+}
+
 void __attribute__((noinline))
 test_pvfnmad_vvvvl() {
   // CHECK-LABEL: @test_pvfnmad_vvvvl
@@ -3482,6 +5123,27 @@ test_pvfnmad_vvsvvl() {
   vr4 = _vel_pvfnmad_vvsvvl(vr1, v1, vr3, vr4, 256);
 }
 
+void __attribute__((noinline))
+test_pvfnmad_vvvvMvl() {
+  // CHECK-LABEL: @test_pvfnmad_vvvvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfnmad.vvvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfnmad_vvvvMvl(vr1, vr2, vr3, vm1_512, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_pvfnmad_vsvvMvl() {
+  // CHECK-LABEL: @test_pvfnmad_vsvvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfnmad.vsvvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfnmad_vsvvMvl(v1, vr2, vr3, vm1_512, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_pvfnmad_vvsvMvl() {
+  // CHECK-LABEL: @test_pvfnmad_vvsvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfnmad.vvsvMvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfnmad_vvsvMvl(vr1, v1, vr3, vm1_512, vr4, 256);
+}
+
 void __attribute__((noinline))
 test_vfnmsbd_vvvvl() {
   // CHECK-LABEL: @test_vfnmsbd_vvvvl
@@ -3524,6 +5186,27 @@ test_vfnmsbd_vvsvvl() {
   vr4 = _vel_vfnmsbd_vvsvvl(vr1, vd1, vr3, vr4, 256);
 }
 
+void __attribute__((noinline))
+test_vfnmsbd_vvvvmvl() {
+  // CHECK-LABEL: @test_vfnmsbd_vvvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmsbd.vvvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmsbd_vvvvmvl(vr1, vr2, vr3, vm1, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfnmsbd_vsvvmvl() {
+  // CHECK-LABEL: @test_vfnmsbd_vsvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmsbd.vsvvmvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmsbd_vsvvmvl(vd1, vr2, vr3, vm1, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfnmsbd_vvsvmvl() {
+  // CHECK-LABEL: @test_vfnmsbd_vvsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmsbd.vvsvmvl(<256 x double> %{{.*}}, double %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmsbd_vvsvmvl(vr1, vd1, vr3, vm1, vr4, 256);
+}
+
 void __attribute__((noinline))
 test_vfnmsbs_vvvvl() {
   // CHECK-LABEL: @test_vfnmsbs_vvvvl
@@ -3566,6 +5249,27 @@ test_vfnmsbs_vvsvvl() {
   vr4 = _vel_vfnmsbs_vvsvvl(vr1, vf1, vr3, vr4, 256);
 }
 
+void __attribute__((noinline))
+test_vfnmsbs_vvvvmvl() {
+  // CHECK-LABEL: @test_vfnmsbs_vvvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmsbs.vvvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmsbs_vvvvmvl(vr1, vr2, vr3, vm1, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfnmsbs_vsvvmvl() {
+  // CHECK-LABEL: @test_vfnmsbs_vsvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmsbs.vsvvmvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmsbs_vsvvmvl(vf1, vr2, vr3, vm1, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfnmsbs_vvsvmvl() {
+  // CHECK-LABEL: @test_vfnmsbs_vvsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmsbs.vvsvmvl(<256 x double> %{{.*}}, float %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmsbs_vvsvmvl(vr1, vf1, vr3, vm1, vr4, 256);
+}
+
 void __attribute__((noinline))
 test_pvfnmsb_vvvvl() {
   // CHECK-LABEL: @test_pvfnmsb_vvvvl
@@ -3608,6 +5312,27 @@ test_pvfnmsb_vvsvvl() {
   vr4 = _vel_pvfnmsb_vvsvvl(vr1, v1, vr3, vr4, 256);
 }
 
+void __attribute__((noinline))
+test_pvfnmsb_vvvvMvl() {
+  // CHECK-LABEL: @test_pvfnmsb_vvvvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfnmsb.vvvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfnmsb_vvvvMvl(vr1, vr2, vr3, vm1_512, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_pvfnmsb_vsvvMvl() {
+  // CHECK-LABEL: @test_pvfnmsb_vsvvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfnmsb.vsvvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfnmsb_vsvvMvl(v1, vr2, vr3, vm1_512, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_pvfnmsb_vvsvMvl() {
+  // CHECK-LABEL: @test_pvfnmsb_vvsvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfnmsb.vvsvMvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfnmsb_vvsvMvl(vr1, v1, vr3, vm1_512, vr4, 256);
+}
+
 void __attribute__((noinline))
 test_vrcpd_vvl() {
   // CHECK-LABEL: @test_vrcpd_vvl
@@ -3748,6 +5473,13 @@ test_vcvtwdsx_vvvl() {
   vr3 = _vel_vcvtwdsx_vvvl(vr1, vr2, 256);
 }
 
+void __attribute__((noinline))
+test_vcvtwdsx_vvmvl() {
+  // CHECK-LABEL: @test_vcvtwdsx_vvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtwdsx.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcvtwdsx_vvmvl(vr1, vm1, vr2, 256);
+}
+
 void __attribute__((noinline))
 test_vcvtwdsxrz_vvl() {
   // CHECK-LABEL: @test_vcvtwdsxrz_vvl
@@ -3762,6 +5494,13 @@ test_vcvtwdsxrz_vvvl() {
   vr3 = _vel_vcvtwdsxrz_vvvl(vr1, vr2, 256);
 }
 
+void __attribute__((noinline))
+test_vcvtwdsxrz_vvmvl() {
+  // CHECK-LABEL: @test_vcvtwdsxrz_vvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtwdsxrz.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcvtwdsxrz_vvmvl(vr1, vm1, vr2, 256);
+}
+
 void __attribute__((noinline))
 test_vcvtwdzx_vvl() {
   // CHECK-LABEL: @test_vcvtwdzx_vvl
@@ -3776,6 +5515,13 @@ test_vcvtwdzx_vvvl() {
   vr3 = _vel_vcvtwdzx_vvvl(vr1, vr2, 256);
 }
 
+void __attribute__((noinline))
+test_vcvtwdzx_vvmvl() {
+  // CHECK-LABEL: @test_vcvtwdzx_vvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtwdzx.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcvtwdzx_vvmvl(vr1, vm1, vr2, 256);
+}
+
 void __attribute__((noinline))
 test_vcvtwdzxrz_vvl() {
   // CHECK-LABEL: @test_vcvtwdzxrz_vvl
@@ -3790,6 +5536,13 @@ test_vcvtwdzxrz_vvvl() {
   vr3 = _vel_vcvtwdzxrz_vvvl(vr1, vr2, 256);
 }
 
+void __attribute__((noinline))
+test_vcvtwdzxrz_vvmvl() {
+  // CHECK-LABEL: @test_vcvtwdzxrz_vvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtwdzxrz.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcvtwdzxrz_vvmvl(vr1, vm1, vr2, 256);
+}
+
 void __attribute__((noinline))
 test_vcvtwssx_vvl() {
   // CHECK-LABEL: @test_vcvtwssx_vvl
@@ -3804,6 +5557,13 @@ test_vcvtwssx_vvvl() {
   vr3 = _vel_vcvtwssx_vvvl(vr1, vr2, 256);
 }
 
+void __attribute__((noinline))
+test_vcvtwssx_vvmvl() {
+  // CHECK-LABEL: @test_vcvtwssx_vvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtwssx.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcvtwssx_vvmvl(vr1, vm1, vr2, 256);
+}
+
 void __attribute__((noinline))
 test_vcvtwssxrz_vvl() {
   // CHECK-LABEL: @test_vcvtwssxrz_vvl
@@ -3818,6 +5578,13 @@ test_vcvtwssxrz_vvvl() {
   vr3 = _vel_vcvtwssxrz_vvvl(vr1, vr2, 256);
 }
 
+void __attribute__((noinline))
+test_vcvtwssxrz_vvmvl() {
+  // CHECK-LABEL: @test_vcvtwssxrz_vvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtwssxrz.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcvtwssxrz_vvmvl(vr1, vm1, vr2, 256);
+}
+
 void __attribute__((noinline))
 test_vcvtwszx_vvl() {
   // CHECK-LABEL: @test_vcvtwszx_vvl
@@ -3826,178 +5593,2278 @@ test_vcvtwszx_vvl() {
 }
 
 void __attribute__((noinline))
-test_vcvtwszx_vvvl() {
-  // CHECK-LABEL: @test_vcvtwszx_vvvl
-  // CHECK: call <256 x double> @llvm.ve.vl.vcvtwszx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
-  vr3 = _vel_vcvtwszx_vvvl(vr1, vr2, 256);
+test_vcvtwszx_vvvl() {
+  // CHECK-LABEL: @test_vcvtwszx_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtwszx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcvtwszx_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtwszx_vvmvl() {
+  // CHECK-LABEL: @test_vcvtwszx_vvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtwszx.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcvtwszx_vvmvl(vr1, vm1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtwszxrz_vvl() {
+  // CHECK-LABEL: @test_vcvtwszxrz_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtwszxrz.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vcvtwszxrz_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtwszxrz_vvvl() {
+  // CHECK-LABEL: @test_vcvtwszxrz_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtwszxrz.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcvtwszxrz_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtwszxrz_vvmvl() {
+  // CHECK-LABEL: @test_vcvtwszxrz_vvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtwszxrz.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcvtwszxrz_vvmvl(vr1, vm1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvcvtws_vvl() {
+  // CHECK-LABEL: @test_pvcvtws_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvcvtws.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_pvcvtws_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_pvcvtws_vvvl() {
+  // CHECK-LABEL: @test_pvcvtws_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvcvtws.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvcvtws_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvcvtws_vvMvl() {
+  // CHECK-LABEL: @test_pvcvtws_vvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvcvtws.vvMvl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvcvtws_vvMvl(vr1, vm1_512, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvcvtwsrz_vvl() {
+  // CHECK-LABEL: @test_pvcvtwsrz_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvcvtwsrz.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_pvcvtwsrz_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_pvcvtwsrz_vvvl() {
+  // CHECK-LABEL: @test_pvcvtwsrz_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvcvtwsrz.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvcvtwsrz_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvcvtwsrz_vvMvl() {
+  // CHECK-LABEL: @test_pvcvtwsrz_vvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvcvtwsrz.vvMvl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvcvtwsrz_vvMvl(vr1, vm1_512, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtld_vvl() {
+  // CHECK-LABEL: @test_vcvtld_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtld.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vcvtld_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtld_vvvl() {
+  // CHECK-LABEL: @test_vcvtld_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtld.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcvtld_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtld_vvmvl() {
+  // CHECK-LABEL: @test_vcvtld_vvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtld.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcvtld_vvmvl(vr1, vm1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtldrz_vvl() {
+  // CHECK-LABEL: @test_vcvtldrz_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtldrz.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vcvtldrz_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtldrz_vvvl() {
+  // CHECK-LABEL: @test_vcvtldrz_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtldrz.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcvtldrz_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtldrz_vvmvl() {
+  // CHECK-LABEL: @test_vcvtldrz_vvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtldrz.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcvtldrz_vvmvl(vr1, vm1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtdw_vvl() {
+  // CHECK-LABEL: @test_vcvtdw_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtdw.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vcvtdw_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtdw_vvvl() {
+  // CHECK-LABEL: @test_vcvtdw_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtdw.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcvtdw_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtsw_vvl() {
+  // CHECK-LABEL: @test_vcvtsw_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtsw.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vcvtsw_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtsw_vvvl() {
+  // CHECK-LABEL: @test_vcvtsw_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtsw.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcvtsw_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvcvtsw_vvl() {
+  // CHECK-LABEL: @test_pvcvtsw_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvcvtsw.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_pvcvtsw_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_pvcvtsw_vvvl() {
+  // CHECK-LABEL: @test_pvcvtsw_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvcvtsw.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvcvtsw_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtdl_vvl() {
+  // CHECK-LABEL: @test_vcvtdl_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtdl.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vcvtdl_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtdl_vvvl() {
+  // CHECK-LABEL: @test_vcvtdl_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtdl.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcvtdl_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtds_vvl() {
+  // CHECK-LABEL: @test_vcvtds_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtds.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vcvtds_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtds_vvvl() {
+  // CHECK-LABEL: @test_vcvtds_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtds.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcvtds_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtsd_vvl() {
+  // CHECK-LABEL: @test_vcvtsd_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtsd.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vcvtsd_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtsd_vvvl() {
+  // CHECK-LABEL: @test_vcvtsd_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtsd.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcvtsd_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vmrg_vvvml() {
+  // CHECK-LABEL: @test_vmrg_vvvml
+  // CHECK: call <256 x double> @llvm.ve.vl.vmrg.vvvml(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  vr3 = _vel_vmrg_vvvml(vr1, vr2, vm1, 256);
+}
+
+void __attribute__((noinline))
+test_vmrg_vvvmvl() {
+  // CHECK-LABEL: @test_vmrg_vvvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmrg.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmrg_vvvmvl(vr1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vmrg_vsvml() {
+  // CHECK-LABEL: @test_vmrg_vsvml
+  // CHECK: call <256 x double> @llvm.ve.vl.vmrg.vsvml(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  vr3 = _vel_vmrg_vsvml(v1, vr2, vm1, 256);
+}
+
+void __attribute__((noinline))
+test_vmrg_vsvmvl() {
+  // CHECK-LABEL: @test_vmrg_vsvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmrg.vsvmvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmrg_vsvmvl(v1, vr2, vm1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vmrgw_vvvMl() {
+  // CHECK-LABEL: @test_vmrgw_vvvMl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmrgw.vvvMl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 256)
+  vr3 = _vel_vmrgw_vvvMl(vr1, vr2, vm1_512, 256);
+}
+
+void __attribute__((noinline))
+test_vmrgw_vvvMvl() {
+  // CHECK-LABEL: @test_vmrgw_vvvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmrgw.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmrgw_vvvMvl(vr1, vr2, vm1_512, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vmrgw_vsvMl() {
+  // CHECK-LABEL: @test_vmrgw_vsvMl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmrgw.vsvMl(i32 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 256)
+  vr3 = _vel_vmrgw_vsvMl(v1, vr2, vm1_512, 256);
+}
+
+void __attribute__((noinline))
+test_vmrgw_vsvMvl() {
+  // CHECK-LABEL: @test_vmrgw_vsvMvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmrgw.vsvMvl(i32 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmrgw_vsvMvl(v1, vr2, vm1_512, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vshf_vvvsl() {
+  // CHECK-LABEL: @test_vshf_vvvsl
+  // CHECK: call <256 x double> @llvm.ve.vl.vshf.vvvsl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i32 256)
+  vr3 = _vel_vshf_vvvsl(vr1, vr2, v1, 256);
+}
+
+void __attribute__((noinline))
+test_vshf_vvvsvl() {
+  // CHECK-LABEL: @test_vshf_vvvsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vshf.vvvsvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vshf_vvvsvl(vr1, vr2, v1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vcp_vvmvl() {
+  // CHECK-LABEL: @test_vcp_vvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcp.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcp_vvmvl(vr1, vm1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vex_vvmvl() {
+  // CHECK-LABEL: @test_vex_vvmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vex.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vex_vvmvl(vr1, vm1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfmklat_ml(int vl) {
+  // CHECK-LABEL: @test_vfmklat_ml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmklat.ml(i32 %{{.*}})
+  vm1 = _vel_vfmklat_ml(vl);
+}
+
+void __attribute__((noinline))
+test_vfmklaf_ml(int vl) {
+  // CHECK-LABEL: @test_vfmklaf_ml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmklaf.ml(i32 %{{.*}})
+  vm1 = _vel_vfmklaf_ml(vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkat_Ml(int vl) {
+  // CHECK-LABEL: @test_pvfmkat_Ml
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkat.Ml(i32 %{{.*}})
+  vm1_512 = _vel_pvfmkat_Ml(vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkaf_Ml(int vl) {
+  // CHECK-LABEL: @test_pvfmkaf_Ml
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkaf.Ml(i32 %{{.*}})
+  vm1_512 = _vel_pvfmkaf_Ml(vl);
+}
+
+void __attribute__((noinline))
+test_vfmklgt_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmklgt_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmklgt.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmklgt_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmklgt_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmklgt_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmklgt.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmklgt_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkllt_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkllt_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkllt.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkllt_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkllt_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkllt_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkllt.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkllt_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmklne_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmklne_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmklne.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmklne_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmklne_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmklne_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmklne.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmklne_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkleq_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkleq_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkleq.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkleq_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkleq_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkleq_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkleq.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkleq_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmklge_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmklge_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmklge.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmklge_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmklge_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmklge_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmklge.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmklge_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmklle_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmklle_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmklle.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmklle_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmklle_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmklle_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmklle.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmklle_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmklnum_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmklnum_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmklnum.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmklnum_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmklnum_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmklnum_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmklnum.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmklnum_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmklnan_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmklnan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmklnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmklnan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmklnan_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmklnan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmklnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmklnan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmklgtnan_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmklgtnan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmklgtnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmklgtnan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmklgtnan_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmklgtnan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmklgtnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmklgtnan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmklltnan_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmklltnan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmklltnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmklltnan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmklltnan_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmklltnan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmklltnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmklltnan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmklnenan_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmklnenan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmklnenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmklnenan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmklnenan_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmklnenan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmklnenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmklnenan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkleqnan_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkleqnan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkleqnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkleqnan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkleqnan_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkleqnan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkleqnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkleqnan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmklgenan_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmklgenan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmklgenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmklgenan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmklgenan_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmklgenan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmklgenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmklgenan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkllenan_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkllenan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkllenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkllenan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkllenan_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkllenan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkllenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkllenan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkwgt_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkwgt_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwgt.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkwgt_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkwgt_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkwgt_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwgt.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkwgt_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkwlt_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkwlt_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwlt.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkwlt_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkwlt_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkwlt_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwlt.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkwlt_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkwne_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkwne_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwne.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkwne_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkwne_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkwne_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwne.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkwne_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkweq_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkweq_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkweq.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkweq_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkweq_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkweq_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkweq.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkweq_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkwge_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkwge_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwge.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkwge_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkwge_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkwge_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwge.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkwge_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkwle_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkwle_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwle.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkwle_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkwle_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkwle_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwle.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkwle_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkwnum_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkwnum_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwnum.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkwnum_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkwnum_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkwnum_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwnum.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkwnum_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkwnan_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkwnan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkwnan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkwnan_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkwnan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkwnan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkwgtnan_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkwgtnan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwgtnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkwgtnan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkwgtnan_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkwgtnan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwgtnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkwgtnan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkwltnan_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkwltnan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwltnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkwltnan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkwltnan_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkwltnan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwltnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkwltnan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkwnenan_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkwnenan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwnenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkwnenan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkwnenan_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkwnenan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwnenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkwnenan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkweqnan_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkweqnan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkweqnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkweqnan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkweqnan_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkweqnan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkweqnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkweqnan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkwgenan_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkwgenan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwgenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkwgenan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkwgenan_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkwgenan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwgenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkwgenan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkwlenan_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkwlenan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwlenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkwlenan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkwlenan_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkwlenan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwlenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkwlenan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwlogt_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwlogt_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlogt.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwlogt_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwupgt_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwupgt_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupgt.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwupgt_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwlogt_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkwlogt_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlogt.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwlogt_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwupgt_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkwupgt_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupgt.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwupgt_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwlolt_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwlolt_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlolt.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwlolt_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwuplt_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwuplt_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwuplt.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwuplt_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwlolt_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkwlolt_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlolt.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwlolt_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwuplt_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkwuplt_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwuplt.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwuplt_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwlone_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwlone_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlone.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwlone_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwupne_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwupne_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupne.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwupne_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwlone_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkwlone_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlone.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwlone_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwupne_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkwupne_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupne.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwupne_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwloeq_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwloeq_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwloeq.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwloeq_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwupeq_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwupeq_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupeq.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwupeq_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwloeq_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkwloeq_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwloeq.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwloeq_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwupeq_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkwupeq_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupeq.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwupeq_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwloge_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwloge_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwloge.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwloge_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwupge_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwupge_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupge.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwupge_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwloge_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkwloge_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwloge.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwloge_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwupge_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkwupge_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupge.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwupge_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwlole_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwlole_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlole.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwlole_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwuple_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwuple_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwuple.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwuple_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwlole_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkwlole_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlole.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwlole_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwuple_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkwuple_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwuple.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwuple_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwlonum_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwlonum_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlonum.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwlonum_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwupnum_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwupnum_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupnum.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwupnum_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwlonum_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkwlonum_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlonum.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwlonum_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwupnum_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkwupnum_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupnum.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwupnum_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwlonan_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwlonan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlonan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwlonan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwupnan_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwupnan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwupnan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwlonan_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkwlonan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlonan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwlonan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwupnan_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkwupnan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwupnan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwlogtnan_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwlogtnan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlogtnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwlogtnan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwupgtnan_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwupgtnan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupgtnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwupgtnan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwlogtnan_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkwlogtnan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlogtnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwlogtnan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwupgtnan_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkwupgtnan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupgtnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwupgtnan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwloltnan_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwloltnan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwloltnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwloltnan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwupltnan_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwupltnan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupltnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwupltnan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwloltnan_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkwloltnan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwloltnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwloltnan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwupltnan_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkwupltnan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupltnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwupltnan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwlonenan_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwlonenan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlonenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwlonenan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwupnenan_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwupnenan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupnenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwupnenan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwlonenan_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkwlonenan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlonenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwlonenan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwupnenan_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkwupnenan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupnenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwupnenan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwloeqnan_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwloeqnan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwloeqnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwloeqnan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwupeqnan_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwupeqnan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupeqnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwupeqnan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwloeqnan_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkwloeqnan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwloeqnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwloeqnan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwupeqnan_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkwupeqnan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupeqnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwupeqnan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwlogenan_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwlogenan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlogenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwlogenan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwupgenan_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwupgenan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupgenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwupgenan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwlogenan_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkwlogenan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlogenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwlogenan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwupgenan_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkwupgenan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupgenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwupgenan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwlolenan_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwlolenan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlolenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwlolenan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwuplenan_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwuplenan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwuplenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwuplenan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwlolenan_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkwlolenan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlolenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwlolenan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwuplenan_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkwuplenan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwuplenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkwuplenan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwgt_Mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwgt_Mvl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwgt.Mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkwgt_Mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwgt_MvMl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwgt_MvMl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwgt.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkwgt_MvMl(vr1, vm2_512, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwlt_Mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwlt_Mvl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwlt.Mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkwlt_Mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwlt_MvMl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwlt_MvMl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwlt.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkwlt_MvMl(vr1, vm2_512, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwne_Mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwne_Mvl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwne.Mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkwne_Mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwne_MvMl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwne_MvMl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwne.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkwne_MvMl(vr1, vm2_512, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkweq_Mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkweq_Mvl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkweq.Mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkweq_Mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkweq_MvMl(int vl) {
+  // CHECK-LABEL: @test_pvfmkweq_MvMl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkweq.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkweq_MvMl(vr1, vm2_512, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwge_Mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwge_Mvl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwge.Mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkwge_Mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwge_MvMl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwge_MvMl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwge.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkwge_MvMl(vr1, vm2_512, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwle_Mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwle_Mvl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwle.Mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkwle_Mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwle_MvMl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwle_MvMl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwle.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkwle_MvMl(vr1, vm2_512, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwnum_Mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwnum_Mvl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwnum.Mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkwnum_Mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwnum_MvMl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwnum_MvMl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwnum.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkwnum_MvMl(vr1, vm2_512, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwnan_Mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwnan_Mvl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwnan.Mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkwnan_Mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwnan_MvMl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwnan_MvMl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwnan.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkwnan_MvMl(vr1, vm2_512, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwgtnan_Mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwgtnan_Mvl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwgtnan.Mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkwgtnan_Mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwgtnan_MvMl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwgtnan_MvMl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwgtnan.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkwgtnan_MvMl(vr1, vm2_512, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwltnan_Mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwltnan_Mvl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwltnan.Mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkwltnan_Mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwltnan_MvMl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwltnan_MvMl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwltnan.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkwltnan_MvMl(vr1, vm2_512, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwnenan_Mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwnenan_Mvl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwnenan.Mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkwnenan_Mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwnenan_MvMl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwnenan_MvMl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwnenan.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkwnenan_MvMl(vr1, vm2_512, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkweqnan_Mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkweqnan_Mvl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkweqnan.Mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkweqnan_Mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkweqnan_MvMl(int vl) {
+  // CHECK-LABEL: @test_pvfmkweqnan_MvMl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkweqnan.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkweqnan_MvMl(vr1, vm2_512, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwgenan_Mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwgenan_Mvl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwgenan.Mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkwgenan_Mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwgenan_MvMl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwgenan_MvMl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwgenan.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkwgenan_MvMl(vr1, vm2_512, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwlenan_Mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwlenan_Mvl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwlenan.Mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkwlenan_Mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkwlenan_MvMl(int vl) {
+  // CHECK-LABEL: @test_pvfmkwlenan_MvMl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwlenan.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkwlenan_MvMl(vr1, vm2_512, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkdgt_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkdgt_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdgt.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkdgt_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkdgt_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkdgt_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdgt.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkdgt_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkdlt_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkdlt_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdlt.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkdlt_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkdlt_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkdlt_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdlt.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkdlt_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkdne_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkdne_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdne.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkdne_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkdne_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkdne_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdne.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkdne_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkdeq_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkdeq_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdeq.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkdeq_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkdeq_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkdeq_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdeq.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkdeq_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkdge_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkdge_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdge.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkdge_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkdge_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkdge_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdge.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkdge_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkdle_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkdle_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdle.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkdle_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkdle_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkdle_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdle.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkdle_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkdnum_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkdnum_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdnum.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkdnum_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkdnum_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkdnum_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdnum.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkdnum_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkdnan_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkdnan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkdnan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkdnan_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkdnan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkdnan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkdgtnan_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkdgtnan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdgtnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkdgtnan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkdgtnan_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkdgtnan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdgtnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkdgtnan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkdltnan_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkdltnan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdltnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkdltnan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkdltnan_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkdltnan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdltnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkdltnan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkdnenan_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkdnenan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdnenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkdnenan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkdnenan_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkdnenan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdnenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkdnenan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkdeqnan_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkdeqnan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdeqnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkdeqnan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkdeqnan_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkdeqnan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdeqnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkdeqnan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkdgenan_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkdgenan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdgenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkdgenan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkdgenan_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkdgenan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdgenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkdgenan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkdlenan_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkdlenan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdlenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkdlenan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkdlenan_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkdlenan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdlenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkdlenan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmksgt_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmksgt_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmksgt.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmksgt_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmksgt_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmksgt_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmksgt.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmksgt_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkslt_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkslt_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkslt.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkslt_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkslt_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkslt_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkslt.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkslt_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmksne_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmksne_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmksne.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmksne_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmksne_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmksne_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmksne.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmksne_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkseq_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkseq_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkseq.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkseq_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkseq_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkseq_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkseq.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkseq_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmksge_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmksge_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmksge.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmksge_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmksge_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmksge_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmksge.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmksge_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmksle_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmksle_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmksle.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmksle_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmksle_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmksle_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmksle.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmksle_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmksnum_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmksnum_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmksnum.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmksnum_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmksnum_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmksnum_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmksnum.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmksnum_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmksnan_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmksnan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmksnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmksnan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmksnan_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmksnan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmksnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmksnan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmksgtnan_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmksgtnan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmksgtnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmksgtnan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmksgtnan_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmksgtnan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmksgtnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmksgtnan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmksltnan_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmksltnan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmksltnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmksltnan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmksltnan_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmksltnan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmksltnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmksltnan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmksnenan_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmksnenan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmksnenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmksnenan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmksnenan_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmksnenan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmksnenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmksnenan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkseqnan_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkseqnan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkseqnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkseqnan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkseqnan_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkseqnan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkseqnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkseqnan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmksgenan_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmksgenan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmksgenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmksgenan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmksgenan_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmksgenan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmksgenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmksgenan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkslenan_mvl(int vl) {
+  // CHECK-LABEL: @test_vfmkslenan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkslenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkslenan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_vfmkslenan_mvml(int vl) {
+  // CHECK-LABEL: @test_vfmkslenan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.vfmkslenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_vfmkslenan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkslogt_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkslogt_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslogt.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkslogt_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksupgt_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmksupgt_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupgt.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksupgt_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkslogt_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkslogt_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslogt.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkslogt_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksupgt_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmksupgt_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupgt.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksupgt_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkslolt_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkslolt_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslolt.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkslolt_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksuplt_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmksuplt_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksuplt.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksuplt_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkslolt_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkslolt_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslolt.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkslolt_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksuplt_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmksuplt_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksuplt.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksuplt_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkslone_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkslone_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslone.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkslone_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksupne_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmksupne_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupne.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksupne_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkslone_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkslone_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslone.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkslone_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksupne_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmksupne_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupne.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksupne_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksloeq_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmksloeq_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksloeq.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksloeq_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksupeq_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmksupeq_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupeq.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksupeq_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksloeq_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmksloeq_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksloeq.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksloeq_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksupeq_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmksupeq_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupeq.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksupeq_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksloge_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmksloge_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksloge.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksloge_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksupge_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmksupge_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupge.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksupge_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksloge_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmksloge_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksloge.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksloge_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksupge_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmksupge_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupge.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksupge_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkslole_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkslole_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslole.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkslole_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksuple_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmksuple_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksuple.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksuple_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkslole_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkslole_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslole.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkslole_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksuple_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmksuple_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksuple.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksuple_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkslonum_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkslonum_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslonum.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkslonum_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksupnum_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmksupnum_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupnum.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksupnum_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkslonum_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkslonum_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslonum.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkslonum_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksupnum_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmksupnum_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupnum.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksupnum_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkslonan_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkslonan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslonan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkslonan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksupnan_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmksupnan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksupnan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkslonan_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkslonan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslonan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkslonan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksupnan_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmksupnan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksupnan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkslogtnan_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkslogtnan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslogtnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkslogtnan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksupgtnan_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmksupgtnan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupgtnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksupgtnan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkslogtnan_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkslogtnan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslogtnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkslogtnan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksupgtnan_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmksupgtnan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupgtnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksupgtnan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksloltnan_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmksloltnan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksloltnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksloltnan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksupltnan_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmksupltnan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupltnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksupltnan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksloltnan_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmksloltnan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksloltnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksloltnan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksupltnan_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmksupltnan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupltnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksupltnan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkslonenan_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkslonenan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslonenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkslonenan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksupnenan_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmksupnenan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupnenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksupnenan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkslonenan_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkslonenan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslonenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkslonenan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksupnenan_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmksupnenan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupnenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksupnenan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksloeqnan_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmksloeqnan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksloeqnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksloeqnan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksupeqnan_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmksupeqnan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupeqnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksupeqnan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksloeqnan_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmksloeqnan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksloeqnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksloeqnan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksupeqnan_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmksupeqnan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupeqnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksupeqnan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkslogenan_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkslogenan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslogenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkslogenan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksupgenan_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmksupgenan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupgenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksupgenan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkslogenan_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkslogenan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslogenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkslogenan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksupgenan_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmksupgenan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupgenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksupgenan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkslolenan_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkslolenan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslolenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkslolenan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksuplenan_mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmksuplenan_mvl
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksuplenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksuplenan_mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkslolenan_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmkslolenan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslolenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmkslolenan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksuplenan_mvml(int vl) {
+  // CHECK-LABEL: @test_pvfmksuplenan_mvml
+  // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksuplenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}})
+  vm1 = _vel_pvfmksuplenan_mvml(vr1, vm2, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksgt_Mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmksgt_Mvl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksgt.Mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmksgt_Mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmksgt_MvMl(int vl) {
+  // CHECK-LABEL: @test_pvfmksgt_MvMl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksgt.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmksgt_MvMl(vr1, vm2_512, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkslt_Mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkslt_Mvl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkslt.Mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkslt_Mvl(vr1, vl);
 }
 
 void __attribute__((noinline))
-test_vcvtwszxrz_vvl() {
-  // CHECK-LABEL: @test_vcvtwszxrz_vvl
-  // CHECK: call <256 x double> @llvm.ve.vl.vcvtwszxrz.vvl(<256 x double> %{{.*}}, i32 256)
-  vr2 = _vel_vcvtwszxrz_vvl(vr1, 256);
+test_pvfmkslt_MvMl(int vl) {
+  // CHECK-LABEL: @test_pvfmkslt_MvMl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkslt.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkslt_MvMl(vr1, vm2_512, vl);
 }
 
 void __attribute__((noinline))
-test_vcvtwszxrz_vvvl() {
-  // CHECK-LABEL: @test_vcvtwszxrz_vvvl
-  // CHECK: call <256 x double> @llvm.ve.vl.vcvtwszxrz.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
-  vr3 = _vel_vcvtwszxrz_vvvl(vr1, vr2, 256);
+test_pvfmksne_Mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmksne_Mvl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksne.Mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmksne_Mvl(vr1, vl);
 }
 
 void __attribute__((noinline))
-test_pvcvtws_vvl() {
-  // CHECK-LABEL: @test_pvcvtws_vvl
-  // CHECK: call <256 x double> @llvm.ve.vl.pvcvtws.vvl(<256 x double> %{{.*}}, i32 256)
-  vr2 = _vel_pvcvtws_vvl(vr1, 256);
+test_pvfmksne_MvMl(int vl) {
+  // CHECK-LABEL: @test_pvfmksne_MvMl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksne.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmksne_MvMl(vr1, vm2_512, vl);
 }
 
 void __attribute__((noinline))
-test_pvcvtws_vvvl() {
-  // CHECK-LABEL: @test_pvcvtws_vvvl
-  // CHECK: call <256 x double> @llvm.ve.vl.pvcvtws.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
-  vr3 = _vel_pvcvtws_vvvl(vr1, vr2, 256);
+test_pvfmkseq_Mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkseq_Mvl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkseq.Mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkseq_Mvl(vr1, vl);
 }
 
 void __attribute__((noinline))
-test_pvcvtwsrz_vvl() {
-  // CHECK-LABEL: @test_pvcvtwsrz_vvl
-  // CHECK: call <256 x double> @llvm.ve.vl.pvcvtwsrz.vvl(<256 x double> %{{.*}}, i32 256)
-  vr2 = _vel_pvcvtwsrz_vvl(vr1, 256);
+test_pvfmkseq_MvMl(int vl) {
+  // CHECK-LABEL: @test_pvfmkseq_MvMl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkseq.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkseq_MvMl(vr1, vm2_512, vl);
 }
 
 void __attribute__((noinline))
-test_pvcvtwsrz_vvvl() {
-  // CHECK-LABEL: @test_pvcvtwsrz_vvvl
-  // CHECK: call <256 x double> @llvm.ve.vl.pvcvtwsrz.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
-  vr3 = _vel_pvcvtwsrz_vvvl(vr1, vr2, 256);
+test_pvfmksge_Mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmksge_Mvl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksge.Mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmksge_Mvl(vr1, vl);
 }
 
 void __attribute__((noinline))
-test_vcvtld_vvl() {
-  // CHECK-LABEL: @test_vcvtld_vvl
-  // CHECK: call <256 x double> @llvm.ve.vl.vcvtld.vvl(<256 x double> %{{.*}}, i32 256)
-  vr2 = _vel_vcvtld_vvl(vr1, 256);
+test_pvfmksge_MvMl(int vl) {
+  // CHECK-LABEL: @test_pvfmksge_MvMl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksge.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmksge_MvMl(vr1, vm2_512, vl);
 }
 
 void __attribute__((noinline))
-test_vcvtld_vvvl() {
-  // CHECK-LABEL: @test_vcvtld_vvvl
-  // CHECK: call <256 x double> @llvm.ve.vl.vcvtld.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
-  vr3 = _vel_vcvtld_vvvl(vr1, vr2, 256);
+test_pvfmksle_Mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmksle_Mvl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksle.Mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmksle_Mvl(vr1, vl);
 }
 
 void __attribute__((noinline))
-test_vcvtldrz_vvl() {
-  // CHECK-LABEL: @test_vcvtldrz_vvl
-  // CHECK: call <256 x double> @llvm.ve.vl.vcvtldrz.vvl(<256 x double> %{{.*}}, i32 256)
-  vr2 = _vel_vcvtldrz_vvl(vr1, 256);
+test_pvfmksle_MvMl(int vl) {
+  // CHECK-LABEL: @test_pvfmksle_MvMl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksle.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmksle_MvMl(vr1, vm2_512, vl);
 }
 
 void __attribute__((noinline))
-test_vcvtldrz_vvvl() {
-  // CHECK-LABEL: @test_vcvtldrz_vvvl
-  // CHECK: call <256 x double> @llvm.ve.vl.vcvtldrz.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
-  vr3 = _vel_vcvtldrz_vvvl(vr1, vr2, 256);
+test_pvfmksnum_Mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmksnum_Mvl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksnum.Mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmksnum_Mvl(vr1, vl);
 }
 
 void __attribute__((noinline))
-test_vcvtdw_vvl() {
-  // CHECK-LABEL: @test_vcvtdw_vvl
-  // CHECK: call <256 x double> @llvm.ve.vl.vcvtdw.vvl(<256 x double> %{{.*}}, i32 256)
-  vr2 = _vel_vcvtdw_vvl(vr1, 256);
+test_pvfmksnum_MvMl(int vl) {
+  // CHECK-LABEL: @test_pvfmksnum_MvMl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksnum.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmksnum_MvMl(vr1, vm2_512, vl);
 }
 
 void __attribute__((noinline))
-test_vcvtdw_vvvl() {
-  // CHECK-LABEL: @test_vcvtdw_vvvl
-  // CHECK: call <256 x double> @llvm.ve.vl.vcvtdw.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
-  vr3 = _vel_vcvtdw_vvvl(vr1, vr2, 256);
+test_pvfmksnan_Mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmksnan_Mvl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksnan.Mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmksnan_Mvl(vr1, vl);
 }
 
 void __attribute__((noinline))
-test_vcvtsw_vvl() {
-  // CHECK-LABEL: @test_vcvtsw_vvl
-  // CHECK: call <256 x double> @llvm.ve.vl.vcvtsw.vvl(<256 x double> %{{.*}}, i32 256)
-  vr2 = _vel_vcvtsw_vvl(vr1, 256);
+test_pvfmksnan_MvMl(int vl) {
+  // CHECK-LABEL: @test_pvfmksnan_MvMl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksnan.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmksnan_MvMl(vr1, vm2_512, vl);
 }
 
 void __attribute__((noinline))
-test_vcvtsw_vvvl() {
-  // CHECK-LABEL: @test_vcvtsw_vvvl
-  // CHECK: call <256 x double> @llvm.ve.vl.vcvtsw.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
-  vr3 = _vel_vcvtsw_vvvl(vr1, vr2, 256);
+test_pvfmksgtnan_Mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmksgtnan_Mvl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksgtnan.Mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmksgtnan_Mvl(vr1, vl);
 }
 
 void __attribute__((noinline))
-test_pvcvtsw_vvl() {
-  // CHECK-LABEL: @test_pvcvtsw_vvl
-  // CHECK: call <256 x double> @llvm.ve.vl.pvcvtsw.vvl(<256 x double> %{{.*}}, i32 256)
-  vr2 = _vel_pvcvtsw_vvl(vr1, 256);
+test_pvfmksgtnan_MvMl(int vl) {
+  // CHECK-LABEL: @test_pvfmksgtnan_MvMl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksgtnan.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmksgtnan_MvMl(vr1, vm2_512, vl);
 }
 
 void __attribute__((noinline))
-test_pvcvtsw_vvvl() {
-  // CHECK-LABEL: @test_pvcvtsw_vvvl
-  // CHECK: call <256 x double> @llvm.ve.vl.pvcvtsw.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
-  vr3 = _vel_pvcvtsw_vvvl(vr1, vr2, 256);
+test_pvfmksltnan_Mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmksltnan_Mvl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksltnan.Mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmksltnan_Mvl(vr1, vl);
 }
 
 void __attribute__((noinline))
-test_vcvtdl_vvl() {
-  // CHECK-LABEL: @test_vcvtdl_vvl
-  // CHECK: call <256 x double> @llvm.ve.vl.vcvtdl.vvl(<256 x double> %{{.*}}, i32 256)
-  vr2 = _vel_vcvtdl_vvl(vr1, 256);
+test_pvfmksltnan_MvMl(int vl) {
+  // CHECK-LABEL: @test_pvfmksltnan_MvMl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksltnan.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmksltnan_MvMl(vr1, vm2_512, vl);
 }
 
 void __attribute__((noinline))
-test_vcvtdl_vvvl() {
-  // CHECK-LABEL: @test_vcvtdl_vvvl
-  // CHECK: call <256 x double> @llvm.ve.vl.vcvtdl.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
-  vr3 = _vel_vcvtdl_vvvl(vr1, vr2, 256);
+test_pvfmksnenan_Mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmksnenan_Mvl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksnenan.Mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmksnenan_Mvl(vr1, vl);
 }
 
 void __attribute__((noinline))
-test_vcvtds_vvl() {
-  // CHECK-LABEL: @test_vcvtds_vvl
-  // CHECK: call <256 x double> @llvm.ve.vl.vcvtds.vvl(<256 x double> %{{.*}}, i32 256)
-  vr2 = _vel_vcvtds_vvl(vr1, 256);
+test_pvfmksnenan_MvMl(int vl) {
+  // CHECK-LABEL: @test_pvfmksnenan_MvMl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksnenan.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmksnenan_MvMl(vr1, vm2_512, vl);
 }
 
 void __attribute__((noinline))
-test_vcvtds_vvvl() {
-  // CHECK-LABEL: @test_vcvtds_vvvl
-  // CHECK: call <256 x double> @llvm.ve.vl.vcvtds.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
-  vr3 = _vel_vcvtds_vvvl(vr1, vr2, 256);
+test_pvfmkseqnan_Mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkseqnan_Mvl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkseqnan.Mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkseqnan_Mvl(vr1, vl);
 }
 
 void __attribute__((noinline))
-test_vcvtsd_vvl() {
-  // CHECK-LABEL: @test_vcvtsd_vvl
-  // CHECK: call <256 x double> @llvm.ve.vl.vcvtsd.vvl(<256 x double> %{{.*}}, i32 256)
-  vr2 = _vel_vcvtsd_vvl(vr1, 256);
+test_pvfmkseqnan_MvMl(int vl) {
+  // CHECK-LABEL: @test_pvfmkseqnan_MvMl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkseqnan.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkseqnan_MvMl(vr1, vm2_512, vl);
 }
 
 void __attribute__((noinline))
-test_vcvtsd_vvvl() {
-  // CHECK-LABEL: @test_vcvtsd_vvvl
-  // CHECK: call <256 x double> @llvm.ve.vl.vcvtsd.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
-  vr3 = _vel_vcvtsd_vvvl(vr1, vr2, 256);
+test_pvfmksgenan_Mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmksgenan_Mvl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksgenan.Mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmksgenan_Mvl(vr1, vl);
 }
 
 void __attribute__((noinline))
-test_vshf_vvvsl() {
-  // CHECK-LABEL: @test_vshf_vvvsl
-  // CHECK: call <256 x double> @llvm.ve.vl.vshf.vvvsl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i32 256)
-  vr3 = _vel_vshf_vvvsl(vr1, vr2, v1, 256);
+test_pvfmksgenan_MvMl(int vl) {
+  // CHECK-LABEL: @test_pvfmksgenan_MvMl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksgenan.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmksgenan_MvMl(vr1, vm2_512, vl);
 }
 
 void __attribute__((noinline))
-test_vshf_vvvsvl() {
-  // CHECK-LABEL: @test_vshf_vvvsvl
-  // CHECK: call <256 x double> @llvm.ve.vl.vshf.vvvsvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
-  vr3 = _vel_vshf_vvvsvl(vr1, vr2, v1, vr3, 256);
+test_pvfmkslenan_Mvl(int vl) {
+  // CHECK-LABEL: @test_pvfmkslenan_Mvl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkslenan.Mvl(<256 x double> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkslenan_Mvl(vr1, vl);
+}
+
+void __attribute__((noinline))
+test_pvfmkslenan_MvMl(int vl) {
+  // CHECK-LABEL: @test_pvfmkslenan_MvMl
+  // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkslenan.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}})
+  vm1_512 = _vel_pvfmkslenan_MvMl(vr1, vm2_512, vl);
 }
 
 void __attribute__((noinline))
@@ -4007,6 +7874,13 @@ test_vsumwsx_vvl() {
   vr2 = _vel_vsumwsx_vvl(vr1, 256);
 }
 
+void __attribute__((noinline))
+test_vsumwsx_vvml() {
+  // CHECK-LABEL: @test_vsumwsx_vvml
+  // CHECK: call <256 x double> @llvm.ve.vl.vsumwsx.vvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  vr2 = _vel_vsumwsx_vvml(vr1, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vsumwzx_vvl() {
   // CHECK-LABEL: @test_vsumwzx_vvl
@@ -4014,6 +7888,13 @@ test_vsumwzx_vvl() {
   vr2 = _vel_vsumwzx_vvl(vr1, 256);
 }
 
+void __attribute__((noinline))
+test_vsumwzx_vvml() {
+  // CHECK-LABEL: @test_vsumwzx_vvml
+  // CHECK: call <256 x double> @llvm.ve.vl.vsumwzx.vvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  vr2 = _vel_vsumwzx_vvml(vr1, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vsuml_vvl() {
   // CHECK-LABEL: @test_vsuml_vvl
@@ -4021,6 +7902,13 @@ test_vsuml_vvl() {
   vr2 = _vel_vsuml_vvl(vr1, 256);
 }
 
+void __attribute__((noinline))
+test_vsuml_vvml() {
+  // CHECK-LABEL: @test_vsuml_vvml
+  // CHECK: call <256 x double> @llvm.ve.vl.vsuml.vvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  vr2 = _vel_vsuml_vvml(vr1, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vfsumd_vvl() {
   // CHECK-LABEL: @test_vfsumd_vvl
@@ -4028,6 +7916,13 @@ test_vfsumd_vvl() {
   vr2 = _vel_vfsumd_vvl(vr1, 256);
 }
 
+void __attribute__((noinline))
+test_vfsumd_vvml() {
+  // CHECK-LABEL: @test_vfsumd_vvml
+  // CHECK: call <256 x double> @llvm.ve.vl.vfsumd.vvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  vr2 = _vel_vfsumd_vvml(vr1, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vfsums_vvl() {
   // CHECK-LABEL: @test_vfsums_vvl
@@ -4035,6 +7930,13 @@ test_vfsums_vvl() {
   vr2 = _vel_vfsums_vvl(vr1, 256);
 }
 
+void __attribute__((noinline))
+test_vfsums_vvml() {
+  // CHECK-LABEL: @test_vfsums_vvml
+  // CHECK: call <256 x double> @llvm.ve.vl.vfsums.vvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  vr2 = _vel_vfsums_vvml(vr1, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vrmaxswfstsx_vvl() {
   // CHECK-LABEL: @test_vrmaxswfstsx_vvl
@@ -4322,6 +8224,13 @@ test_vrand_vvl() {
   vr3 = _vel_vrand_vvl(vr1, 256);
 }
 
+void __attribute__((noinline))
+test_vrand_vvml() {
+  // CHECK-LABEL: @test_vrand_vvml
+  // CHECK: call <256 x double> @llvm.ve.vl.vrand.vvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  vr3 = _vel_vrand_vvml(vr1, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vror_vvl() {
   // CHECK-LABEL: @test_vror_vvl
@@ -4329,6 +8238,13 @@ test_vror_vvl() {
   vr3 = _vel_vror_vvl(vr1, 256);
 }
 
+void __attribute__((noinline))
+test_vror_vvml() {
+  // CHECK-LABEL: @test_vror_vvml
+  // CHECK: call <256 x double> @llvm.ve.vl.vror.vvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  vr3 = _vel_vror_vvml(vr1, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vrxor_vvl() {
   // CHECK-LABEL: @test_vrxor_vvl
@@ -4336,6 +8252,13 @@ test_vrxor_vvl() {
   vr3 = _vel_vrxor_vvl(vr1, 256);
 }
 
+void __attribute__((noinline))
+test_vrxor_vvml() {
+  // CHECK-LABEL: @test_vrxor_vvml
+  // CHECK: call <256 x double> @llvm.ve.vl.vrxor.vvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  vr3 = _vel_vrxor_vvml(vr1, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vgt_vvssl() {
   // CHECK-LABEL: @test_vgt_vvssl
@@ -4350,6 +8273,20 @@ test_vgt_vvssvl() {
   vr3 = _vel_vgt_vvssvl(vr1, v1, v2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vgt_vvssml() {
+  // CHECK-LABEL: @test_vgt_vvssml
+  // CHECK: call <256 x double> @llvm.ve.vl.vgt.vvssml(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  vr3 = _vel_vgt_vvssml(vr1, v1, v2, vm1, 256);
+}
+
+void __attribute__((noinline))
+test_vgt_vvssmvl() {
+  // CHECK-LABEL: @test_vgt_vvssmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vgt.vvssmvl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vgt_vvssmvl(vr1, v1, v2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vgtnc_vvssl() {
   // CHECK-LABEL: @test_vgtnc_vvssl
@@ -4364,6 +8301,20 @@ test_vgtnc_vvssvl() {
   vr3 = _vel_vgtnc_vvssvl(vr1, v1, v2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vgtnc_vvssml() {
+  // CHECK-LABEL: @test_vgtnc_vvssml
+  // CHECK: call <256 x double> @llvm.ve.vl.vgtnc.vvssml(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  vr3 = _vel_vgtnc_vvssml(vr1, v1, v2, vm1, 256);
+}
+
+void __attribute__((noinline))
+test_vgtnc_vvssmvl() {
+  // CHECK-LABEL: @test_vgtnc_vvssmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vgtnc.vvssmvl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vgtnc_vvssmvl(vr1, v1, v2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vgtu_vvssl() {
   // CHECK-LABEL: @test_vgtu_vvssl
@@ -4378,6 +8329,20 @@ test_vgtu_vvssvl() {
   vr3 = _vel_vgtu_vvssvl(vr1, v1, v2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vgtu_vvssml() {
+  // CHECK-LABEL: @test_vgtu_vvssml
+  // CHECK: call <256 x double> @llvm.ve.vl.vgtu.vvssml(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  vr3 = _vel_vgtu_vvssml(vr1, v1, v2, vm1, 256);
+}
+
+void __attribute__((noinline))
+test_vgtu_vvssmvl() {
+  // CHECK-LABEL: @test_vgtu_vvssmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vgtu.vvssmvl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vgtu_vvssmvl(vr1, v1, v2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vgtunc_vvssl() {
   // CHECK-LABEL: @test_vgtunc_vvssl
@@ -4392,6 +8357,20 @@ test_vgtunc_vvssvl() {
   vr3 = _vel_vgtunc_vvssvl(vr1, v1, v2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vgtunc_vvssml() {
+  // CHECK-LABEL: @test_vgtunc_vvssml
+  // CHECK: call <256 x double> @llvm.ve.vl.vgtunc.vvssml(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  vr3 = _vel_vgtunc_vvssml(vr1, v1, v2, vm1, 256);
+}
+
+void __attribute__((noinline))
+test_vgtunc_vvssmvl() {
+  // CHECK-LABEL: @test_vgtunc_vvssmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vgtunc.vvssmvl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vgtunc_vvssmvl(vr1, v1, v2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vgtlsx_vvssl() {
   // CHECK-LABEL: @test_vgtlsx_vvssl
@@ -4406,6 +8385,20 @@ test_vgtlsx_vvssvl() {
   vr3 = _vel_vgtlsx_vvssvl(vr1, v1, v2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vgtlsx_vvssml() {
+  // CHECK-LABEL: @test_vgtlsx_vvssml
+  // CHECK: call <256 x double> @llvm.ve.vl.vgtlsx.vvssml(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  vr3 = _vel_vgtlsx_vvssml(vr1, v1, v2, vm1, 256);
+}
+
+void __attribute__((noinline))
+test_vgtlsx_vvssmvl() {
+  // CHECK-LABEL: @test_vgtlsx_vvssmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vgtlsx.vvssmvl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vgtlsx_vvssmvl(vr1, v1, v2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vgtlsxnc_vvssl() {
   // CHECK-LABEL: @test_vgtlsxnc_vvssl
@@ -4420,6 +8413,20 @@ test_vgtlsxnc_vvssvl() {
   vr3 = _vel_vgtlsxnc_vvssvl(vr1, v1, v2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vgtlsxnc_vvssml() {
+  // CHECK-LABEL: @test_vgtlsxnc_vvssml
+  // CHECK: call <256 x double> @llvm.ve.vl.vgtlsxnc.vvssml(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  vr3 = _vel_vgtlsxnc_vvssml(vr1, v1, v2, vm1, 256);
+}
+
+void __attribute__((noinline))
+test_vgtlsxnc_vvssmvl() {
+  // CHECK-LABEL: @test_vgtlsxnc_vvssmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vgtlsxnc.vvssmvl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vgtlsxnc_vvssmvl(vr1, v1, v2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vgtlzx_vvssl() {
   // CHECK-LABEL: @test_vgtlzx_vvssl
@@ -4434,6 +8441,20 @@ test_vgtlzx_vvssvl() {
   vr3 = _vel_vgtlzx_vvssvl(vr1, v1, v2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vgtlzx_vvssml() {
+  // CHECK-LABEL: @test_vgtlzx_vvssml
+  // CHECK: call <256 x double> @llvm.ve.vl.vgtlzx.vvssml(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  vr3 = _vel_vgtlzx_vvssml(vr1, v1, v2, vm1, 256);
+}
+
+void __attribute__((noinline))
+test_vgtlzx_vvssmvl() {
+  // CHECK-LABEL: @test_vgtlzx_vvssmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vgtlzx.vvssmvl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vgtlzx_vvssmvl(vr1, v1, v2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vgtlzxnc_vvssl() {
   // CHECK-LABEL: @test_vgtlzxnc_vvssl
@@ -4448,6 +8469,20 @@ test_vgtlzxnc_vvssvl() {
   vr3 = _vel_vgtlzxnc_vvssvl(vr1, v1, v2, vr3, 256);
 }
 
+void __attribute__((noinline))
+test_vgtlzxnc_vvssml() {
+  // CHECK-LABEL: @test_vgtlzxnc_vvssml
+  // CHECK: call <256 x double> @llvm.ve.vl.vgtlzxnc.vvssml(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  vr3 = _vel_vgtlzxnc_vvssml(vr1, v1, v2, vm1, 256);
+}
+
+void __attribute__((noinline))
+test_vgtlzxnc_vvssmvl() {
+  // CHECK-LABEL: @test_vgtlzxnc_vvssmvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vgtlzxnc.vvssmvl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vgtlzxnc_vvssmvl(vr1, v1, v2, vm1, vr3, 256);
+}
+
 void __attribute__((noinline))
 test_vsc_vvssl() {
   // CHECK-LABEL: @test_vsc_vvssl
@@ -4455,6 +8490,13 @@ test_vsc_vvssl() {
   _vel_vsc_vvssl(vr1, vr2, v1, v2, 256);
 }
 
+void __attribute__((noinline))
+test_vsc_vvssml() {
+  // CHECK-LABEL: @test_vsc_vvssml
+  // CHECK: call void @llvm.ve.vl.vsc.vvssml(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vsc_vvssml(vr1, vr2, v1, v2, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vscnc_vvssl() {
   // CHECK-LABEL: @test_vscnc_vvssl
@@ -4462,6 +8504,13 @@ test_vscnc_vvssl() {
   _vel_vscnc_vvssl(vr1, vr2, v1, v2, 256);
 }
 
+void __attribute__((noinline))
+test_vscnc_vvssml() {
+  // CHECK-LABEL: @test_vscnc_vvssml
+  // CHECK: call void @llvm.ve.vl.vscnc.vvssml(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vscnc_vvssml(vr1, vr2, v1, v2, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vscot_vvssl() {
   // CHECK-LABEL: @test_vscot_vvssl
@@ -4469,6 +8518,13 @@ test_vscot_vvssl() {
   _vel_vscot_vvssl(vr1, vr2, v1, v2, 256);
 }
 
+void __attribute__((noinline))
+test_vscot_vvssml() {
+  // CHECK-LABEL: @test_vscot_vvssml
+  // CHECK: call void @llvm.ve.vl.vscot.vvssml(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vscot_vvssml(vr1, vr2, v1, v2, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vscncot_vvssl() {
   // CHECK-LABEL: @test_vscncot_vvssl
@@ -4476,6 +8532,13 @@ test_vscncot_vvssl() {
   _vel_vscncot_vvssl(vr1, vr2, v1, v2, 256);
 }
 
+void __attribute__((noinline))
+test_vscncot_vvssml() {
+  // CHECK-LABEL: @test_vscncot_vvssml
+  // CHECK: call void @llvm.ve.vl.vscncot.vvssml(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vscncot_vvssml(vr1, vr2, v1, v2, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vscu_vvssl() {
   // CHECK-LABEL: @test_vscu_vvssl
@@ -4483,6 +8546,13 @@ test_vscu_vvssl() {
   _vel_vscu_vvssl(vr1, vr2, v1, v2, 256);
 }
 
+void __attribute__((noinline))
+test_vscu_vvssml() {
+  // CHECK-LABEL: @test_vscu_vvssml
+  // CHECK: call void @llvm.ve.vl.vscu.vvssml(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vscu_vvssml(vr1, vr2, v1, v2, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vscunc_vvssl() {
   // CHECK-LABEL: @test_vscunc_vvssl
@@ -4490,6 +8560,13 @@ test_vscunc_vvssl() {
   _vel_vscunc_vvssl(vr1, vr2, v1, v2, 256);
 }
 
+void __attribute__((noinline))
+test_vscunc_vvssml() {
+  // CHECK-LABEL: @test_vscunc_vvssml
+  // CHECK: call void @llvm.ve.vl.vscunc.vvssml(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vscunc_vvssml(vr1, vr2, v1, v2, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vscuot_vvssl() {
   // CHECK-LABEL: @test_vscuot_vvssl
@@ -4497,6 +8574,13 @@ test_vscuot_vvssl() {
   _vel_vscuot_vvssl(vr1, vr2, v1, v2, 256);
 }
 
+void __attribute__((noinline))
+test_vscuot_vvssml() {
+  // CHECK-LABEL: @test_vscuot_vvssml
+  // CHECK: call void @llvm.ve.vl.vscuot.vvssml(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vscuot_vvssml(vr1, vr2, v1, v2, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vscuncot_vvssl() {
   // CHECK-LABEL: @test_vscuncot_vvssl
@@ -4504,6 +8588,13 @@ test_vscuncot_vvssl() {
   _vel_vscuncot_vvssl(vr1, vr2, v1, v2, 256);
 }
 
+void __attribute__((noinline))
+test_vscuncot_vvssml() {
+  // CHECK-LABEL: @test_vscuncot_vvssml
+  // CHECK: call void @llvm.ve.vl.vscuncot.vvssml(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vscuncot_vvssml(vr1, vr2, v1, v2, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vscl_vvssl() {
   // CHECK-LABEL: @test_vscl_vvssl
@@ -4511,6 +8602,13 @@ test_vscl_vvssl() {
   _vel_vscl_vvssl(vr1, vr2, v1, v2, 256);
 }
 
+void __attribute__((noinline))
+test_vscl_vvssml() {
+  // CHECK-LABEL: @test_vscl_vvssml
+  // CHECK: call void @llvm.ve.vl.vscl.vvssml(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vscl_vvssml(vr1, vr2, v1, v2, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vsclnc_vvssl() {
   // CHECK-LABEL: @test_vsclnc_vvssl
@@ -4518,6 +8616,13 @@ test_vsclnc_vvssl() {
   _vel_vsclnc_vvssl(vr1, vr2, v1, v2, 256);
 }
 
+void __attribute__((noinline))
+test_vsclnc_vvssml() {
+  // CHECK-LABEL: @test_vsclnc_vvssml
+  // CHECK: call void @llvm.ve.vl.vsclnc.vvssml(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vsclnc_vvssml(vr1, vr2, v1, v2, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vsclot_vvssl() {
   // CHECK-LABEL: @test_vsclot_vvssl
@@ -4525,6 +8630,13 @@ test_vsclot_vvssl() {
   _vel_vsclot_vvssl(vr1, vr2, v1, v2, 256);
 }
 
+void __attribute__((noinline))
+test_vsclot_vvssml() {
+  // CHECK-LABEL: @test_vsclot_vvssml
+  // CHECK: call void @llvm.ve.vl.vsclot.vvssml(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vsclot_vvssml(vr1, vr2, v1, v2, vm1, 256);
+}
+
 void __attribute__((noinline))
 test_vsclncot_vvssl() {
   // CHECK-LABEL: @test_vsclncot_vvssl
@@ -4532,6 +8644,118 @@ test_vsclncot_vvssl() {
   _vel_vsclncot_vvssl(vr1, vr2, v1, v2, 256);
 }
 
+void __attribute__((noinline))
+test_vsclncot_vvssml() {
+  // CHECK-LABEL: @test_vsclncot_vvssml
+  // CHECK: call void @llvm.ve.vl.vsclncot.vvssml(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256)
+  _vel_vsclncot_vvssml(vr1, vr2, v1, v2, vm1, 256);
+}
+
+void __attribute__((noinline))
+test_andm_mmm() {
+  // CHECK-LABEL: @test_andm_mmm
+  // CHECK: call <256 x i1> @llvm.ve.vl.andm.mmm(<256 x i1> %{{.*}}, <256 x i1> %{{.*}})
+  vm3 = _vel_andm_mmm(vm1, vm2);
+}
+
+void __attribute__((noinline))
+test_andm_MMM() {
+  // CHECK-LABEL: @test_andm_MMM
+  // CHECK: call <512 x i1> @llvm.ve.vl.andm.MMM(<512 x i1> %{{.*}}, <512 x i1> %{{.*}})
+  vm3_512 = _vel_andm_MMM(vm1_512, vm2_512);
+}
+
+void __attribute__((noinline))
+test_orm_mmm() {
+  // CHECK-LABEL: @test_orm_mmm
+  // CHECK: call <256 x i1> @llvm.ve.vl.orm.mmm(<256 x i1> %{{.*}}, <256 x i1> %{{.*}})
+  vm3 = _vel_orm_mmm(vm1, vm2);
+}
+
+void __attribute__((noinline))
+test_orm_MMM() {
+  // CHECK-LABEL: @test_orm_MMM
+  // CHECK: call <512 x i1> @llvm.ve.vl.orm.MMM(<512 x i1> %{{.*}}, <512 x i1> %{{.*}})
+  vm3_512 = _vel_orm_MMM(vm1_512, vm2_512);
+}
+
+void __attribute__((noinline))
+test_xorm_mmm() {
+  // CHECK-LABEL: @test_xorm_mmm
+  // CHECK: call <256 x i1> @llvm.ve.vl.xorm.mmm(<256 x i1> %{{.*}}, <256 x i1> %{{.*}})
+  vm3 = _vel_xorm_mmm(vm1, vm2);
+}
+
+void __attribute__((noinline))
+test_xorm_MMM() {
+  // CHECK-LABEL: @test_xorm_MMM
+  // CHECK: call <512 x i1> @llvm.ve.vl.xorm.MMM(<512 x i1> %{{.*}}, <512 x i1> %{{.*}})
+  vm3_512 = _vel_xorm_MMM(vm1_512, vm2_512);
+}
+
+void __attribute__((noinline))
+test_eqvm_mmm() {
+  // CHECK-LABEL: @test_eqvm_mmm
+  // CHECK: call <256 x i1> @llvm.ve.vl.eqvm.mmm(<256 x i1> %{{.*}}, <256 x i1> %{{.*}})
+  vm3 = _vel_eqvm_mmm(vm1, vm2);
+}
+
+void __attribute__((noinline))
+test_eqvm_MMM() {
+  // CHECK-LABEL: @test_eqvm_MMM
+  // CHECK: call <512 x i1> @llvm.ve.vl.eqvm.MMM(<512 x i1> %{{.*}}, <512 x i1> %{{.*}})
+  vm3_512 = _vel_eqvm_MMM(vm1_512, vm2_512);
+}
+
+void __attribute__((noinline))
+test_nndm_mmm() {
+  // CHECK-LABEL: @test_nndm_mmm
+  // CHECK: call <256 x i1> @llvm.ve.vl.nndm.mmm(<256 x i1> %{{.*}}, <256 x i1> %{{.*}})
+  vm3 = _vel_nndm_mmm(vm1, vm2);
+}
+
+void __attribute__((noinline))
+test_nndm_MMM() {
+  // CHECK-LABEL: @test_nndm_MMM
+  // CHECK: call <512 x i1> @llvm.ve.vl.nndm.MMM(<512 x i1> %{{.*}}, <512 x i1> %{{.*}})
+  vm3_512 = _vel_nndm_MMM(vm1_512, vm2_512);
+}
+
+void __attribute__((noinline))
+test_negm_mm() {
+  // CHECK-LABEL: @test_negm_mm
+  // CHECK: call <256 x i1> @llvm.ve.vl.negm.mm(<256 x i1> %{{.*}})
+  vm2 = _vel_negm_mm(vm1);
+}
+
+void __attribute__((noinline))
+test_negm_MM() {
+  // CHECK-LABEL: @test_negm_MM
+  // CHECK: call <512 x i1> @llvm.ve.vl.negm.MM(<512 x i1> %{{.*}})
+  vm2_512 = _vel_negm_MM(vm1_512);
+}
+
+void __attribute__((noinline))
+test_pcvm_sml() {
+  // CHECK-LABEL: @test_pcvm_sml
+  // CHECK: call i64 @llvm.ve.vl.pcvm.sml(<256 x i1> %{{.*}}, i32 256)
+  v1 = _vel_pcvm_sml(vm1, 256);
+}
+
+void __attribute__((noinline))
+test_lzvm_sml() {
+  // CHECK-LABEL: @test_lzvm_sml
+  // CHECK: call i64 @llvm.ve.vl.lzvm.sml(<256 x i1> %{{.*}}, i32 256)
+  v1 = _vel_lzvm_sml(vm1, 256);
+}
+
+void __attribute__((noinline))
+test_tovm_sml() {
+  // CHECK-LABEL: @test_tovm_sml
+  // CHECK: call i64 @llvm.ve.vl.tovm.sml(<256 x i1> %{{.*}}, i32 256)
+  v1 = _vel_tovm_sml(vm1, 256);
+}
+
 void __attribute__((noinline))
 test_lcr_sss() {
   // CHECK-LABEL: @test_lcr_sss


        


More information about the cfe-commits mailing list