[clang] c2f62ab - [Clang][VE] Add the rest of intrinsics to clang

Kazushi Marukawa via cfe-commits cfe-commits at lists.llvm.org
Wed Mar 16 08:18:55 PDT 2022


Author: Kazushi (Jam) Marukawa
Date: 2022-03-17T00:17:21+09:00
New Revision: c2f62ab84b5f19bbe5a670dd5c2a2d19f15e83b9

URL: https://github.com/llvm/llvm-project/commit/c2f62ab84b5f19bbe5a670dd5c2a2d19f15e83b9
DIFF: https://github.com/llvm/llvm-project/commit/c2f62ab84b5f19bbe5a670dd5c2a2d19f15e83b9.diff

LOG: [Clang][VE] Add the rest of intrinsics to clang

Add the rest of intrinsics to clang except intrinsics using vector mask
registers.

Reviewed By: simoll

Differential Revision: https://reviews.llvm.org/D121586

Added: 
    clang/lib/Headers/velintrin_approx.h

Modified: 
    clang/include/clang/Basic/BuiltinsVEVL.gen.def
    clang/lib/Headers/CMakeLists.txt
    clang/lib/Headers/velintrin.h
    clang/lib/Headers/velintrin_gen.h
    clang/test/CodeGen/VE/ve-velintrin.c

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/BuiltinsVEVL.gen.def b/clang/include/clang/Basic/BuiltinsVEVL.gen.def
index d90e35e917451..9960c89b53001 100644
--- a/clang/include/clang/Basic/BuiltinsVEVL.gen.def
+++ b/clang/include/clang/Basic/BuiltinsVEVL.gen.def
@@ -30,3 +30,625 @@ BUILTIN(__builtin_ve_vl_vldl2dzx_vssl, "V256dLUivC*Ui", "n")
 BUILTIN(__builtin_ve_vl_vldl2dzx_vssvl, "V256dLUivC*V256dUi", "n")
 BUILTIN(__builtin_ve_vl_vldl2dzxnc_vssl, "V256dLUivC*Ui", "n")
 BUILTIN(__builtin_ve_vl_vldl2dzxnc_vssvl, "V256dLUivC*V256dUi", "n")
+BUILTIN(__builtin_ve_vl_vst_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstnc_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstncot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstu_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstunc_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstuot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstuncot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstl_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstlnc_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstlot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstlncot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vst2d_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vst2dnc_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vst2dot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vst2dncot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstu2d_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstu2dnc_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstu2dot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstu2dncot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstl2d_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstl2dnc_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstl2dot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstl2dncot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_pfchv_ssl, "vLivC*Ui", "n")
+BUILTIN(__builtin_ve_vl_pfchvnc_ssl, "vLivC*Ui", "n")
+BUILTIN(__builtin_ve_vl_lsv_vvss, "V256dV256dUiLUi", "n")
+BUILTIN(__builtin_ve_vl_lvsl_svs, "LUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_lvsd_svs, "dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_lvss_svs, "fV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vbrdd_vsl, "V256ddUi", "n")
+BUILTIN(__builtin_ve_vl_vbrdd_vsvl, "V256ddV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vbrdl_vsl, "V256dLiUi", "n")
+BUILTIN(__builtin_ve_vl_vbrdl_vsvl, "V256dLiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vbrds_vsl, "V256dfUi", "n")
+BUILTIN(__builtin_ve_vl_vbrds_vsvl, "V256dfV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vbrdw_vsl, "V256diUi", "n")
+BUILTIN(__builtin_ve_vl_vbrdw_vsvl, "V256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvbrd_vsl, "V256dLUiUi", "n")
+BUILTIN(__builtin_ve_vl_pvbrd_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmv_vsvl, "V256dUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmv_vsvvl, "V256dUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddul_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddul_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddul_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddul_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vadduw_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vadduw_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vadduw_vsvl, "V256dUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vadduw_vsvvl, "V256dUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvaddu_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvaddu_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvaddu_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvaddu_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddswsx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddswsx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddswsx_vsvl, "V256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddswsx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddswzx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddswzx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddswzx_vsvl, "V256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddswzx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvadds_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvadds_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvadds_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvadds_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddsl_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddsl_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddsl_vsvl, "V256dLiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddsl_vsvvl, "V256dLiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubul_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubul_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubul_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubul_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubuw_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubuw_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubuw_vsvl, "V256dUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubuw_vsvvl, "V256dUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsubu_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsubu_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsubu_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsubu_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubswsx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubswsx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubswsx_vsvl, "V256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubswsx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubswzx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubswzx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubswzx_vsvl, "V256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubswzx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsubs_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsubs_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsubs_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsubs_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubsl_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubsl_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubsl_vsvl, "V256dLiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubsl_vsvvl, "V256dLiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulul_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulul_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulul_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulul_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmuluw_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmuluw_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmuluw_vsvl, "V256dUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmuluw_vsvvl, "V256dUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulswsx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulswsx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulswsx_vsvl, "V256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulswsx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulswzx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulswzx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulswzx_vsvl, "V256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulswzx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulsl_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulsl_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulsl_vsvl, "V256dLiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulsl_vsvvl, "V256dLiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulslw_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulslw_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulslw_vsvl, "V256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulslw_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivul_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivul_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivul_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivul_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivuw_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivuw_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivuw_vsvl, "V256dUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivuw_vsvvl, "V256dUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivul_vvsl, "V256dV256dLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vdivul_vvsvl, "V256dV256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivuw_vvsl, "V256dV256dUiUi", "n")
+BUILTIN(__builtin_ve_vl_vdivuw_vvsvl, "V256dV256dUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswsx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswsx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswsx_vsvl, "V256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswsx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswzx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswzx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswzx_vsvl, "V256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswzx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswsx_vvsl, "V256dV256diUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswsx_vvsvl, "V256dV256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswzx_vvsl, "V256dV256diUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswzx_vvsvl, "V256dV256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivsl_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivsl_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivsl_vsvl, "V256dLiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivsl_vsvvl, "V256dLiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivsl_vvsl, "V256dV256dLiUi", "n")
+BUILTIN(__builtin_ve_vl_vdivsl_vvsvl, "V256dV256dLiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpul_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpul_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpul_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpul_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpuw_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpuw_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpuw_vsvl, "V256dUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpuw_vsvvl, "V256dUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcmpu_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcmpu_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcmpu_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcmpu_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpswsx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpswsx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpswsx_vsvl, "V256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpswsx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpswzx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpswzx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpswzx_vsvl, "V256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpswzx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcmps_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcmps_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcmps_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcmps_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpsl_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpsl_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpsl_vsvl, "V256dLiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpsl_vsvvl, "V256dLiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxswsx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxswsx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxswsx_vsvl, "V256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxswsx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxswzx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxswzx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxswzx_vsvl, "V256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxswzx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvmaxs_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvmaxs_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvmaxs_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvmaxs_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminswsx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminswsx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminswsx_vsvl, "V256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminswsx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminswzx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminswzx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminswzx_vsvl, "V256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminswzx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvmins_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvmins_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvmins_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvmins_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxsl_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxsl_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxsl_vsvl, "V256dLiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxsl_vsvvl, "V256dLiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminsl_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminsl_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminsl_vsvl, "V256dLiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminsl_vsvvl, "V256dLiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vand_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vand_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vand_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vand_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvand_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvand_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvand_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvand_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vor_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vor_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vor_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vor_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvor_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvor_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvor_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvor_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vxor_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vxor_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vxor_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vxor_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvxor_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvxor_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvxor_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvxor_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_veqv_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_veqv_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_veqv_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_veqv_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pveqv_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pveqv_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pveqv_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pveqv_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vldz_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vldz_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvldzlo_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvldzlo_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvldzup_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvldzup_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvldz_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvldz_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vpcnt_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vpcnt_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvpcntlo_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvpcntlo_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvpcntup_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvpcntup_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvpcnt_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvpcnt_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vbrv_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vbrv_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvbrvlo_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvbrvlo_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvbrvup_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvbrvup_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvbrv_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvbrv_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vseq_vl, "V256dUi", "n")
+BUILTIN(__builtin_ve_vl_vseq_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvseqlo_vl, "V256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvseqlo_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsequp_vl, "V256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsequp_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvseq_vl, "V256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvseq_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsll_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsll_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsll_vvsl, "V256dV256dLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vsll_vvsvl, "V256dV256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsll_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsll_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsll_vvsl, "V256dV256dLUiUi", "n")
+BUILTIN(__builtin_ve_vl_pvsll_vvsvl, "V256dV256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsrl_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsrl_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsrl_vvsl, "V256dV256dLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vsrl_vvsvl, "V256dV256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsrl_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsrl_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsrl_vvsl, "V256dV256dLUiUi", "n")
+BUILTIN(__builtin_ve_vl_pvsrl_vvsvl, "V256dV256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vslawsx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vslawsx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vslawsx_vvsl, "V256dV256diUi", "n")
+BUILTIN(__builtin_ve_vl_vslawsx_vvsvl, "V256dV256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vslawzx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vslawzx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vslawzx_vvsl, "V256dV256diUi", "n")
+BUILTIN(__builtin_ve_vl_vslawzx_vvsvl, "V256dV256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsla_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsla_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsla_vvsl, "V256dV256dLUiUi", "n")
+BUILTIN(__builtin_ve_vl_pvsla_vvsvl, "V256dV256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vslal_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vslal_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vslal_vvsl, "V256dV256dLiUi", "n")
+BUILTIN(__builtin_ve_vl_vslal_vvsvl, "V256dV256dLiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsrawsx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsrawsx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsrawsx_vvsl, "V256dV256diUi", "n")
+BUILTIN(__builtin_ve_vl_vsrawsx_vvsvl, "V256dV256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsrawzx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsrawzx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsrawzx_vvsl, "V256dV256diUi", "n")
+BUILTIN(__builtin_ve_vl_vsrawzx_vvsvl, "V256dV256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsra_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsra_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsra_vvsl, "V256dV256dLUiUi", "n")
+BUILTIN(__builtin_ve_vl_pvsra_vvsvl, "V256dV256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsral_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsral_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsral_vvsl, "V256dV256dLiUi", "n")
+BUILTIN(__builtin_ve_vl_vsral_vvsvl, "V256dV256dLiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsfa_vvssl, "V256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vsfa_vvssvl, "V256dV256dLUiLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfaddd_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfaddd_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfaddd_vsvl, "V256ddV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfaddd_vsvvl, "V256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfadds_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfadds_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfadds_vsvl, "V256dfV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfadds_vsvvl, "V256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfadd_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfadd_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfadd_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfadd_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsubd_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsubd_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsubd_vsvl, "V256ddV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsubd_vsvvl, "V256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsubs_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsubs_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsubs_vsvl, "V256dfV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsubs_vsvvl, "V256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfsub_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfsub_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfsub_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfsub_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmuld_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmuld_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmuld_vsvl, "V256ddV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmuld_vsvvl, "V256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmuls_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmuls_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmuls_vsvl, "V256dfV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmuls_vsvvl, "V256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmul_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmul_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmul_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmul_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfdivd_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfdivd_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfdivd_vsvl, "V256ddV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfdivd_vsvvl, "V256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfdivs_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfdivs_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfdivs_vsvl, "V256dfV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfdivs_vsvvl, "V256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsqrtd_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsqrtd_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsqrts_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsqrts_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfcmpd_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfcmpd_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfcmpd_vsvl, "V256ddV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfcmpd_vsvvl, "V256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfcmps_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfcmps_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfcmps_vsvl, "V256dfV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfcmps_vsvvl, "V256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfcmp_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfcmp_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfcmp_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfcmp_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmaxd_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmaxd_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmaxd_vsvl, "V256ddV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmaxd_vsvvl, "V256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmaxs_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmaxs_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmaxs_vsvl, "V256dfV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmaxs_vsvvl, "V256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmax_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmax_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmax_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmax_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmind_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmind_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmind_vsvl, "V256ddV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmind_vsvvl, "V256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmins_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmins_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmins_vsvl, "V256dfV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmins_vsvvl, "V256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmin_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmin_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmin_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmin_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmadd_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmadd_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmadd_vsvvl, "V256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmadd_vsvvvl, "V256ddV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmadd_vvsvl, "V256dV256ddV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmadd_vvsvvl, "V256dV256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmads_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmads_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmads_vsvvl, "V256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmads_vsvvvl, "V256dfV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmads_vvsvl, "V256dV256dfV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmads_vvsvvl, "V256dV256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmad_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmad_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmad_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmad_vsvvvl, "V256dLUiV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmad_vvsvl, "V256dV256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmad_vvsvvl, "V256dV256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbd_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbd_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbd_vsvvl, "V256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbd_vsvvvl, "V256ddV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbd_vvsvl, "V256dV256ddV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbd_vvsvvl, "V256dV256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbs_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbs_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbs_vsvvl, "V256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbs_vsvvvl, "V256dfV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbs_vvsvl, "V256dV256dfV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbs_vvsvvl, "V256dV256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmsb_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmsb_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmsb_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmsb_vsvvvl, "V256dLUiV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmsb_vvsvl, "V256dV256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmsb_vvsvvl, "V256dV256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmadd_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmadd_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmadd_vsvvl, "V256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmadd_vsvvvl, "V256ddV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmadd_vvsvl, "V256dV256ddV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmadd_vvsvvl, "V256dV256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmads_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmads_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmads_vsvvl, "V256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmads_vsvvvl, "V256dfV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmads_vvsvl, "V256dV256dfV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmads_vvsvvl, "V256dV256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmad_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmad_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmad_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmad_vsvvvl, "V256dLUiV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmad_vvsvl, "V256dV256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmad_vvsvvl, "V256dV256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbd_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbd_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbd_vsvvl, "V256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbd_vsvvvl, "V256ddV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbd_vvsvl, "V256dV256ddV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbd_vvsvvl, "V256dV256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbs_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbs_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbs_vsvvl, "V256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbs_vsvvvl, "V256dfV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbs_vvsvl, "V256dV256dfV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbs_vvsvvl, "V256dV256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmsb_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmsb_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmsb_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmsb_vsvvvl, "V256dLUiV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmsb_vvsvl, "V256dV256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmsb_vvsvvl, "V256dV256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrcpd_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrcpd_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrcps_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrcps_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvrcp_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvrcp_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrsqrtd_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrsqrtd_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrsqrts_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrsqrts_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvrsqrt_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvrsqrt_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrsqrtdnex_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrsqrtdnex_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrsqrtsnex_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrsqrtsnex_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvrsqrtnex_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvrsqrtnex_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwdsx_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwdsx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwdsxrz_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwdsxrz_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwdzx_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwdzx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwdzxrz_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwdzxrz_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwssx_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwssx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwssxrz_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwssxrz_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwszx_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwszx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwszxrz_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwszxrz_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcvtws_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcvtws_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcvtwsrz_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcvtwsrz_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtld_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtld_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtldrz_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtldrz_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtdw_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtdw_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtsw_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtsw_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcvtsw_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcvtsw_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtdl_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtdl_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtds_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtds_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtsd_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtsd_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vshf_vvvsl, "V256dV256dV256dLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vshf_vvvsvl, "V256dV256dV256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsumwsx_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsumwzx_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsuml_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsumd_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsums_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrmaxswfstsx_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrmaxswfstsx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrmaxswlstsx_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrmaxswlstsx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrmaxswfstzx_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrmaxswfstzx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrmaxswlstzx_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrmaxswlstzx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrminswfstsx_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrminswfstsx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrminswlstsx_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrminswlstsx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrminswfstzx_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrminswfstzx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrminswlstzx_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrminswlstzx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrmaxslfst_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrmaxslfst_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrmaxsllst_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrmaxsllst_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrminslfst_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrminslfst_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrminsllst_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrminsllst_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfrmaxdfst_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfrmaxdfst_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfrmaxdlst_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfrmaxdlst_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfrmaxsfst_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfrmaxsfst_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfrmaxslst_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfrmaxslst_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfrmindfst_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfrmindfst_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfrmindlst_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfrmindlst_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfrminsfst_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfrminsfst_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfrminslst_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfrminslst_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrand_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vror_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrxor_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vgt_vvssl, "V256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vgt_vvssvl, "V256dV256dLUiLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vgtnc_vvssl, "V256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vgtnc_vvssvl, "V256dV256dLUiLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vgtu_vvssl, "V256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vgtu_vvssvl, "V256dV256dLUiLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vgtunc_vvssl, "V256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vgtunc_vvssvl, "V256dV256dLUiLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vgtlsx_vvssl, "V256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vgtlsx_vvssvl, "V256dV256dLUiLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vgtlsxnc_vvssl, "V256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vgtlsxnc_vvssvl, "V256dV256dLUiLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vgtlzx_vvssl, "V256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vgtlzx_vvssvl, "V256dV256dLUiLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vgtlzxnc_vvssl, "V256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vgtlzxnc_vvssvl, "V256dV256dLUiLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsc_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vscnc_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vscot_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vscncot_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vscu_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vscunc_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vscuot_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vscuncot_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vscl_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vsclnc_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vsclot_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vsclncot_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_lcr_sss, "LUiLUiLUi", "n")
+BUILTIN(__builtin_ve_vl_scr_sss, "vLUiLUiLUi", "n")
+BUILTIN(__builtin_ve_vl_tscr_ssss, "LUiLUiLUiLUi", "n")
+BUILTIN(__builtin_ve_vl_fidcr_sss, "LUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_fencei, "v", "n")
+BUILTIN(__builtin_ve_vl_fencem_s, "vUi", "n")
+BUILTIN(__builtin_ve_vl_fencec_s, "vUi", "n")
+BUILTIN(__builtin_ve_vl_svob, "v", "n")

diff  --git a/clang/lib/Headers/CMakeLists.txt b/clang/lib/Headers/CMakeLists.txt
index 447b1400b58f2..e218a6a46a147 100644
--- a/clang/lib/Headers/CMakeLists.txt
+++ b/clang/lib/Headers/CMakeLists.txt
@@ -147,6 +147,7 @@ set(files
   xtestintrin.h
   velintrin.h
   velintrin_gen.h
+  velintrin_approx.h
   )
 
 set(cuda_wrapper_files

diff  --git a/clang/lib/Headers/velintrin.h b/clang/lib/Headers/velintrin.h
index 5988384b87863..c12054a9e965b 100644
--- a/clang/lib/Headers/velintrin.h
+++ b/clang/lib/Headers/velintrin.h
@@ -32,7 +32,43 @@ typedef bool __vm512 __attribute__((ext_vector_type(512)));
 #endif
 #endif
 
+enum VShuffleCodes {
+  VE_VSHUFFLE_YUYU = 0,
+  VE_VSHUFFLE_YUYL = 1,
+  VE_VSHUFFLE_YUZU = 2,
+  VE_VSHUFFLE_YUZL = 3,
+  VE_VSHUFFLE_YLYU = 4,
+  VE_VSHUFFLE_YLYL = 5,
+  VE_VSHUFFLE_YLZU = 6,
+  VE_VSHUFFLE_YLZL = 7,
+  VE_VSHUFFLE_ZUYU = 8,
+  VE_VSHUFFLE_ZUYL = 9,
+  VE_VSHUFFLE_ZUZU = 10,
+  VE_VSHUFFLE_ZUZL = 11,
+  VE_VSHUFFLE_ZLYU = 12,
+  VE_VSHUFFLE_ZLYL = 13,
+  VE_VSHUFFLE_ZLZU = 14,
+  VE_VSHUFFLE_ZLZL = 15,
+};
+
 // Use generated intrinsic name definitions
 #include <velintrin_gen.h>
 
+// Use helper functions
+#include <velintrin_approx.h>
+
+// pack
+
+#define _vel_pack_f32p __builtin_ve_vl_pack_f32p
+#define _vel_pack_f32a __builtin_ve_vl_pack_f32a
+
+static inline unsigned long int _vel_pack_i32(unsigned int a, unsigned int b) {
+  return (((unsigned long int)a) << 32) | b;
+}
+
+#define _vel_extract_vm512u(vm) __builtin_ve_vl_extract_vm512u(vm)
+#define _vel_extract_vm512l(vm) __builtin_ve_vl_extract_vm512l(vm)
+#define _vel_insert_vm512u(vm512, vm) __builtin_ve_vl_insert_vm512u(vm512, vm)
+#define _vel_insert_vm512l(vm512, vm) __builtin_ve_vl_insert_vm512l(vm512, vm)
+
 #endif

diff  --git a/clang/lib/Headers/velintrin_approx.h b/clang/lib/Headers/velintrin_approx.h
new file mode 100644
index 0000000000000..89d270fef3c7d
--- /dev/null
+++ b/clang/lib/Headers/velintrin_approx.h
@@ -0,0 +1,120 @@
+/*===---- velintrin_approx.h - VEL intrinsics helper for VE ----------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __VEL_INTRIN_APPROX_H__
+#define __VEL_INTRIN_APPROX_H__
+
+static inline __vr _vel_approx_vfdivs_vvvl(__vr v0, __vr v1, int l) {
+  float s0;
+  __vr v2, v3, v4, v5;
+  v5 = _vel_vrcps_vvl(v1, l);
+  s0 = 1.0;
+  v4 = _vel_vfnmsbs_vsvvl(s0, v1, v5, l);
+  v3 = _vel_vfmads_vvvvl(v5, v5, v4, l);
+  v2 = _vel_vfmuls_vvvl(v0, v3, l);
+  v4 = _vel_vfnmsbs_vvvvl(v0, v2, v1, l);
+  v2 = _vel_vfmads_vvvvl(v2, v5, v4, l);
+  v0 = _vel_vfnmsbs_vvvvl(v0, v2, v1, l);
+  v0 = _vel_vfmads_vvvvl(v2, v3, v0, l);
+  return v0;
+}
+
+static inline __vr _vel_approx_pvfdiv_vvvl(__vr v0, __vr v1, int l) {
+  float s0;
+  __vr v2, v3, v4, v5;
+  v5 = _vel_pvrcp_vvl(v1, l);
+  s0 = 1.0;
+  v4 = _vel_pvfnmsb_vsvvl(s0, v1, v5, l);
+  v3 = _vel_pvfmad_vvvvl(v5, v5, v4, l);
+  v2 = _vel_pvfmul_vvvl(v0, v3, l);
+  v4 = _vel_pvfnmsb_vvvvl(v0, v2, v1, l);
+  v2 = _vel_pvfmad_vvvvl(v2, v5, v4, l);
+  v0 = _vel_pvfnmsb_vvvvl(v0, v2, v1, l);
+  v0 = _vel_pvfmad_vvvvl(v2, v3, v0, l);
+  return v0;
+}
+
+static inline __vr _vel_approx_vfdivs_vsvl(float s0, __vr v0, int l) {
+  float s1;
+  __vr v1, v2, v3, v4;
+  v4 = _vel_vrcps_vvl(v0, l);
+  s1 = 1.0;
+  v2 = _vel_vfnmsbs_vsvvl(s1, v0, v4, l);
+  v2 = _vel_vfmads_vvvvl(v4, v4, v2, l);
+  v1 = _vel_vfmuls_vsvl(s0, v2, l);
+  v3 = _vel_vfnmsbs_vsvvl(s0, v1, v0, l);
+  v1 = _vel_vfmads_vvvvl(v1, v4, v3, l);
+  v3 = _vel_vfnmsbs_vsvvl(s0, v1, v0, l);
+  v0 = _vel_vfmads_vvvvl(v1, v2, v3, l);
+  return v0;
+}
+
+static inline __vr _vel_approx_vfdivs_vvsl(__vr v0, float s0, int l) {
+  float s1;
+  __vr v1, v2;
+  s1 = 1.0f / s0;
+  v1 = _vel_vfmuls_vsvl(s1, v0, l);
+  v2 = _vel_vfnmsbs_vvsvl(v0, s0, v1, l);
+  v0 = _vel_vfmads_vvsvl(v1, s1, v2, l);
+  return v0;
+}
+
+static inline __vr _vel_approx_vfdivd_vsvl(double s0, __vr v0, int l) {
+  __vr v1, v2, v3;
+  v2 = _vel_vrcpd_vvl(v0, l);
+  double s1 = 1.0;
+  v3 = _vel_vfnmsbd_vsvvl(s1, v0, v2, l);
+  v2 = _vel_vfmadd_vvvvl(v2, v2, v3, l);
+  v1 = _vel_vfnmsbd_vsvvl(s1, v0, v2, l);
+  v1 = _vel_vfmadd_vvvvl(v2, v2, v1, l);
+  v1 = _vel_vaddul_vsvl(1, v1, l);
+  v3 = _vel_vfnmsbd_vsvvl(s1, v0, v1, l);
+  v3 = _vel_vfmadd_vvvvl(v1, v1, v3, l);
+  v1 = _vel_vfmuld_vsvl(s0, v3, l);
+  v0 = _vel_vfnmsbd_vsvvl(s0, v1, v0, l);
+  v0 = _vel_vfmadd_vvvvl(v1, v3, v0, l);
+  return v0;
+}
+
+static inline __vr _vel_approx_vfsqrtd_vvl(__vr v0, int l) {
+  double s0, s1;
+  __vr v1, v2, v3;
+  v2 = _vel_vrsqrtdnex_vvl(v0, l);
+  v1 = _vel_vfmuld_vvvl(v0, v2, l);
+  s0 = 1.0;
+  s1 = 0.5;
+  v3 = _vel_vfnmsbd_vsvvl(s0, v1, v2, l);
+  v3 = _vel_vfmuld_vsvl(s1, v3, l);
+  v2 = _vel_vfmadd_vvvvl(v2, v2, v3, l);
+  v1 = _vel_vfmuld_vvvl(v0, v2, l);
+  v3 = _vel_vfnmsbd_vsvvl(s0, v1, v2, l);
+  v3 = _vel_vfmuld_vsvl(s1, v3, l);
+  v0 = _vel_vfmadd_vvvvl(v1, v1, v3, l);
+  return v0;
+}
+
+static inline __vr _vel_approx_vfsqrts_vvl(__vr v0, int l) {
+  float s0, s1;
+  __vr v1, v2, v3;
+  v0 = _vel_vcvtds_vvl(v0, l);
+  v2 = _vel_vrsqrtdnex_vvl(v0, l);
+  v1 = _vel_vfmuld_vvvl(v0, v2, l);
+  s0 = 1.0;
+  s1 = 0.5;
+  v3 = _vel_vfnmsbd_vsvvl(s0, v1, v2, l);
+  v3 = _vel_vfmuld_vsvl(s1, v3, l);
+  v2 = _vel_vfmadd_vvvvl(v2, v2, v3, l);
+  v1 = _vel_vfmuld_vvvl(v0, v2, l);
+  v3 = _vel_vfnmsbd_vsvvl(s0, v1, v2, l);
+  v3 = _vel_vfmuld_vsvl(s1, v3, l);
+  v0 = _vel_vfmadd_vvvvl(v1, v1, v3, l);
+  v0 = _vel_vcvtsd_vvl(v0, l);
+  return v0;
+}
+
+#endif

diff  --git a/clang/lib/Headers/velintrin_gen.h b/clang/lib/Headers/velintrin_gen.h
index 23d23573a7fec..845c0da2ffa22 100644
--- a/clang/lib/Headers/velintrin_gen.h
+++ b/clang/lib/Headers/velintrin_gen.h
@@ -30,3 +30,1228 @@
 #define _vel_vldl2dzx_vssvl __builtin_ve_vl_vldl2dzx_vssvl
 #define _vel_vldl2dzxnc_vssl __builtin_ve_vl_vldl2dzxnc_vssl
 #define _vel_vldl2dzxnc_vssvl __builtin_ve_vl_vldl2dzxnc_vssvl
+#define _vel_vst_vssl __builtin_ve_vl_vst_vssl
+#define _vel_vst_vssml __builtin_ve_vl_vst_vssml
+#define _vel_vstnc_vssl __builtin_ve_vl_vstnc_vssl
+#define _vel_vstnc_vssml __builtin_ve_vl_vstnc_vssml
+#define _vel_vstot_vssl __builtin_ve_vl_vstot_vssl
+#define _vel_vstot_vssml __builtin_ve_vl_vstot_vssml
+#define _vel_vstncot_vssl __builtin_ve_vl_vstncot_vssl
+#define _vel_vstncot_vssml __builtin_ve_vl_vstncot_vssml
+#define _vel_vstu_vssl __builtin_ve_vl_vstu_vssl
+#define _vel_vstu_vssml __builtin_ve_vl_vstu_vssml
+#define _vel_vstunc_vssl __builtin_ve_vl_vstunc_vssl
+#define _vel_vstunc_vssml __builtin_ve_vl_vstunc_vssml
+#define _vel_vstuot_vssl __builtin_ve_vl_vstuot_vssl
+#define _vel_vstuot_vssml __builtin_ve_vl_vstuot_vssml
+#define _vel_vstuncot_vssl __builtin_ve_vl_vstuncot_vssl
+#define _vel_vstuncot_vssml __builtin_ve_vl_vstuncot_vssml
+#define _vel_vstl_vssl __builtin_ve_vl_vstl_vssl
+#define _vel_vstl_vssml __builtin_ve_vl_vstl_vssml
+#define _vel_vstlnc_vssl __builtin_ve_vl_vstlnc_vssl
+#define _vel_vstlnc_vssml __builtin_ve_vl_vstlnc_vssml
+#define _vel_vstlot_vssl __builtin_ve_vl_vstlot_vssl
+#define _vel_vstlot_vssml __builtin_ve_vl_vstlot_vssml
+#define _vel_vstlncot_vssl __builtin_ve_vl_vstlncot_vssl
+#define _vel_vstlncot_vssml __builtin_ve_vl_vstlncot_vssml
+#define _vel_vst2d_vssl __builtin_ve_vl_vst2d_vssl
+#define _vel_vst2d_vssml __builtin_ve_vl_vst2d_vssml
+#define _vel_vst2dnc_vssl __builtin_ve_vl_vst2dnc_vssl
+#define _vel_vst2dnc_vssml __builtin_ve_vl_vst2dnc_vssml
+#define _vel_vst2dot_vssl __builtin_ve_vl_vst2dot_vssl
+#define _vel_vst2dot_vssml __builtin_ve_vl_vst2dot_vssml
+#define _vel_vst2dncot_vssl __builtin_ve_vl_vst2dncot_vssl
+#define _vel_vst2dncot_vssml __builtin_ve_vl_vst2dncot_vssml
+#define _vel_vstu2d_vssl __builtin_ve_vl_vstu2d_vssl
+#define _vel_vstu2d_vssml __builtin_ve_vl_vstu2d_vssml
+#define _vel_vstu2dnc_vssl __builtin_ve_vl_vstu2dnc_vssl
+#define _vel_vstu2dnc_vssml __builtin_ve_vl_vstu2dnc_vssml
+#define _vel_vstu2dot_vssl __builtin_ve_vl_vstu2dot_vssl
+#define _vel_vstu2dot_vssml __builtin_ve_vl_vstu2dot_vssml
+#define _vel_vstu2dncot_vssl __builtin_ve_vl_vstu2dncot_vssl
+#define _vel_vstu2dncot_vssml __builtin_ve_vl_vstu2dncot_vssml
+#define _vel_vstl2d_vssl __builtin_ve_vl_vstl2d_vssl
+#define _vel_vstl2d_vssml __builtin_ve_vl_vstl2d_vssml
+#define _vel_vstl2dnc_vssl __builtin_ve_vl_vstl2dnc_vssl
+#define _vel_vstl2dnc_vssml __builtin_ve_vl_vstl2dnc_vssml
+#define _vel_vstl2dot_vssl __builtin_ve_vl_vstl2dot_vssl
+#define _vel_vstl2dot_vssml __builtin_ve_vl_vstl2dot_vssml
+#define _vel_vstl2dncot_vssl __builtin_ve_vl_vstl2dncot_vssl
+#define _vel_vstl2dncot_vssml __builtin_ve_vl_vstl2dncot_vssml
+#define _vel_pfchv_ssl __builtin_ve_vl_pfchv_ssl
+#define _vel_pfchvnc_ssl __builtin_ve_vl_pfchvnc_ssl
+#define _vel_lsv_vvss __builtin_ve_vl_lsv_vvss
+#define _vel_lvsl_svs __builtin_ve_vl_lvsl_svs
+#define _vel_lvsd_svs __builtin_ve_vl_lvsd_svs
+#define _vel_lvss_svs __builtin_ve_vl_lvss_svs
+#define _vel_lvm_mmss __builtin_ve_vl_lvm_mmss
+#define _vel_lvm_MMss __builtin_ve_vl_lvm_MMss
+#define _vel_svm_sms __builtin_ve_vl_svm_sms
+#define _vel_svm_sMs __builtin_ve_vl_svm_sMs
+#define _vel_vbrdd_vsl __builtin_ve_vl_vbrdd_vsl
+#define _vel_vbrdd_vsvl __builtin_ve_vl_vbrdd_vsvl
+#define _vel_vbrdd_vsmvl __builtin_ve_vl_vbrdd_vsmvl
+#define _vel_vbrdl_vsl __builtin_ve_vl_vbrdl_vsl
+#define _vel_vbrdl_vsvl __builtin_ve_vl_vbrdl_vsvl
+#define _vel_vbrdl_vsmvl __builtin_ve_vl_vbrdl_vsmvl
+#define _vel_vbrds_vsl __builtin_ve_vl_vbrds_vsl
+#define _vel_vbrds_vsvl __builtin_ve_vl_vbrds_vsvl
+#define _vel_vbrds_vsmvl __builtin_ve_vl_vbrds_vsmvl
+#define _vel_vbrdw_vsl __builtin_ve_vl_vbrdw_vsl
+#define _vel_vbrdw_vsvl __builtin_ve_vl_vbrdw_vsvl
+#define _vel_vbrdw_vsmvl __builtin_ve_vl_vbrdw_vsmvl
+#define _vel_pvbrd_vsl __builtin_ve_vl_pvbrd_vsl
+#define _vel_pvbrd_vsvl __builtin_ve_vl_pvbrd_vsvl
+#define _vel_pvbrd_vsMvl __builtin_ve_vl_pvbrd_vsMvl
+#define _vel_vmv_vsvl __builtin_ve_vl_vmv_vsvl
+#define _vel_vmv_vsvvl __builtin_ve_vl_vmv_vsvvl
+#define _vel_vmv_vsvmvl __builtin_ve_vl_vmv_vsvmvl
+#define _vel_vaddul_vvvl __builtin_ve_vl_vaddul_vvvl
+#define _vel_vaddul_vvvvl __builtin_ve_vl_vaddul_vvvvl
+#define _vel_vaddul_vsvl __builtin_ve_vl_vaddul_vsvl
+#define _vel_vaddul_vsvvl __builtin_ve_vl_vaddul_vsvvl
+#define _vel_vaddul_vvvmvl __builtin_ve_vl_vaddul_vvvmvl
+#define _vel_vaddul_vsvmvl __builtin_ve_vl_vaddul_vsvmvl
+#define _vel_vadduw_vvvl __builtin_ve_vl_vadduw_vvvl
+#define _vel_vadduw_vvvvl __builtin_ve_vl_vadduw_vvvvl
+#define _vel_vadduw_vsvl __builtin_ve_vl_vadduw_vsvl
+#define _vel_vadduw_vsvvl __builtin_ve_vl_vadduw_vsvvl
+#define _vel_vadduw_vvvmvl __builtin_ve_vl_vadduw_vvvmvl
+#define _vel_vadduw_vsvmvl __builtin_ve_vl_vadduw_vsvmvl
+#define _vel_pvaddu_vvvl __builtin_ve_vl_pvaddu_vvvl
+#define _vel_pvaddu_vvvvl __builtin_ve_vl_pvaddu_vvvvl
+#define _vel_pvaddu_vsvl __builtin_ve_vl_pvaddu_vsvl
+#define _vel_pvaddu_vsvvl __builtin_ve_vl_pvaddu_vsvvl
+#define _vel_pvaddu_vvvMvl __builtin_ve_vl_pvaddu_vvvMvl
+#define _vel_pvaddu_vsvMvl __builtin_ve_vl_pvaddu_vsvMvl
+#define _vel_vaddswsx_vvvl __builtin_ve_vl_vaddswsx_vvvl
+#define _vel_vaddswsx_vvvvl __builtin_ve_vl_vaddswsx_vvvvl
+#define _vel_vaddswsx_vsvl __builtin_ve_vl_vaddswsx_vsvl
+#define _vel_vaddswsx_vsvvl __builtin_ve_vl_vaddswsx_vsvvl
+#define _vel_vaddswsx_vvvmvl __builtin_ve_vl_vaddswsx_vvvmvl
+#define _vel_vaddswsx_vsvmvl __builtin_ve_vl_vaddswsx_vsvmvl
+#define _vel_vaddswzx_vvvl __builtin_ve_vl_vaddswzx_vvvl
+#define _vel_vaddswzx_vvvvl __builtin_ve_vl_vaddswzx_vvvvl
+#define _vel_vaddswzx_vsvl __builtin_ve_vl_vaddswzx_vsvl
+#define _vel_vaddswzx_vsvvl __builtin_ve_vl_vaddswzx_vsvvl
+#define _vel_vaddswzx_vvvmvl __builtin_ve_vl_vaddswzx_vvvmvl
+#define _vel_vaddswzx_vsvmvl __builtin_ve_vl_vaddswzx_vsvmvl
+#define _vel_pvadds_vvvl __builtin_ve_vl_pvadds_vvvl
+#define _vel_pvadds_vvvvl __builtin_ve_vl_pvadds_vvvvl
+#define _vel_pvadds_vsvl __builtin_ve_vl_pvadds_vsvl
+#define _vel_pvadds_vsvvl __builtin_ve_vl_pvadds_vsvvl
+#define _vel_pvadds_vvvMvl __builtin_ve_vl_pvadds_vvvMvl
+#define _vel_pvadds_vsvMvl __builtin_ve_vl_pvadds_vsvMvl
+#define _vel_vaddsl_vvvl __builtin_ve_vl_vaddsl_vvvl
+#define _vel_vaddsl_vvvvl __builtin_ve_vl_vaddsl_vvvvl
+#define _vel_vaddsl_vsvl __builtin_ve_vl_vaddsl_vsvl
+#define _vel_vaddsl_vsvvl __builtin_ve_vl_vaddsl_vsvvl
+#define _vel_vaddsl_vvvmvl __builtin_ve_vl_vaddsl_vvvmvl
+#define _vel_vaddsl_vsvmvl __builtin_ve_vl_vaddsl_vsvmvl
+#define _vel_vsubul_vvvl __builtin_ve_vl_vsubul_vvvl
+#define _vel_vsubul_vvvvl __builtin_ve_vl_vsubul_vvvvl
+#define _vel_vsubul_vsvl __builtin_ve_vl_vsubul_vsvl
+#define _vel_vsubul_vsvvl __builtin_ve_vl_vsubul_vsvvl
+#define _vel_vsubul_vvvmvl __builtin_ve_vl_vsubul_vvvmvl
+#define _vel_vsubul_vsvmvl __builtin_ve_vl_vsubul_vsvmvl
+#define _vel_vsubuw_vvvl __builtin_ve_vl_vsubuw_vvvl
+#define _vel_vsubuw_vvvvl __builtin_ve_vl_vsubuw_vvvvl
+#define _vel_vsubuw_vsvl __builtin_ve_vl_vsubuw_vsvl
+#define _vel_vsubuw_vsvvl __builtin_ve_vl_vsubuw_vsvvl
+#define _vel_vsubuw_vvvmvl __builtin_ve_vl_vsubuw_vvvmvl
+#define _vel_vsubuw_vsvmvl __builtin_ve_vl_vsubuw_vsvmvl
+#define _vel_pvsubu_vvvl __builtin_ve_vl_pvsubu_vvvl
+#define _vel_pvsubu_vvvvl __builtin_ve_vl_pvsubu_vvvvl
+#define _vel_pvsubu_vsvl __builtin_ve_vl_pvsubu_vsvl
+#define _vel_pvsubu_vsvvl __builtin_ve_vl_pvsubu_vsvvl
+#define _vel_pvsubu_vvvMvl __builtin_ve_vl_pvsubu_vvvMvl
+#define _vel_pvsubu_vsvMvl __builtin_ve_vl_pvsubu_vsvMvl
+#define _vel_vsubswsx_vvvl __builtin_ve_vl_vsubswsx_vvvl
+#define _vel_vsubswsx_vvvvl __builtin_ve_vl_vsubswsx_vvvvl
+#define _vel_vsubswsx_vsvl __builtin_ve_vl_vsubswsx_vsvl
+#define _vel_vsubswsx_vsvvl __builtin_ve_vl_vsubswsx_vsvvl
+#define _vel_vsubswsx_vvvmvl __builtin_ve_vl_vsubswsx_vvvmvl
+#define _vel_vsubswsx_vsvmvl __builtin_ve_vl_vsubswsx_vsvmvl
+#define _vel_vsubswzx_vvvl __builtin_ve_vl_vsubswzx_vvvl
+#define _vel_vsubswzx_vvvvl __builtin_ve_vl_vsubswzx_vvvvl
+#define _vel_vsubswzx_vsvl __builtin_ve_vl_vsubswzx_vsvl
+#define _vel_vsubswzx_vsvvl __builtin_ve_vl_vsubswzx_vsvvl
+#define _vel_vsubswzx_vvvmvl __builtin_ve_vl_vsubswzx_vvvmvl
+#define _vel_vsubswzx_vsvmvl __builtin_ve_vl_vsubswzx_vsvmvl
+#define _vel_pvsubs_vvvl __builtin_ve_vl_pvsubs_vvvl
+#define _vel_pvsubs_vvvvl __builtin_ve_vl_pvsubs_vvvvl
+#define _vel_pvsubs_vsvl __builtin_ve_vl_pvsubs_vsvl
+#define _vel_pvsubs_vsvvl __builtin_ve_vl_pvsubs_vsvvl
+#define _vel_pvsubs_vvvMvl __builtin_ve_vl_pvsubs_vvvMvl
+#define _vel_pvsubs_vsvMvl __builtin_ve_vl_pvsubs_vsvMvl
+#define _vel_vsubsl_vvvl __builtin_ve_vl_vsubsl_vvvl
+#define _vel_vsubsl_vvvvl __builtin_ve_vl_vsubsl_vvvvl
+#define _vel_vsubsl_vsvl __builtin_ve_vl_vsubsl_vsvl
+#define _vel_vsubsl_vsvvl __builtin_ve_vl_vsubsl_vsvvl
+#define _vel_vsubsl_vvvmvl __builtin_ve_vl_vsubsl_vvvmvl
+#define _vel_vsubsl_vsvmvl __builtin_ve_vl_vsubsl_vsvmvl
+#define _vel_vmulul_vvvl __builtin_ve_vl_vmulul_vvvl
+#define _vel_vmulul_vvvvl __builtin_ve_vl_vmulul_vvvvl
+#define _vel_vmulul_vsvl __builtin_ve_vl_vmulul_vsvl
+#define _vel_vmulul_vsvvl __builtin_ve_vl_vmulul_vsvvl
+#define _vel_vmulul_vvvmvl __builtin_ve_vl_vmulul_vvvmvl
+#define _vel_vmulul_vsvmvl __builtin_ve_vl_vmulul_vsvmvl
+#define _vel_vmuluw_vvvl __builtin_ve_vl_vmuluw_vvvl
+#define _vel_vmuluw_vvvvl __builtin_ve_vl_vmuluw_vvvvl
+#define _vel_vmuluw_vsvl __builtin_ve_vl_vmuluw_vsvl
+#define _vel_vmuluw_vsvvl __builtin_ve_vl_vmuluw_vsvvl
+#define _vel_vmuluw_vvvmvl __builtin_ve_vl_vmuluw_vvvmvl
+#define _vel_vmuluw_vsvmvl __builtin_ve_vl_vmuluw_vsvmvl
+#define _vel_vmulswsx_vvvl __builtin_ve_vl_vmulswsx_vvvl
+#define _vel_vmulswsx_vvvvl __builtin_ve_vl_vmulswsx_vvvvl
+#define _vel_vmulswsx_vsvl __builtin_ve_vl_vmulswsx_vsvl
+#define _vel_vmulswsx_vsvvl __builtin_ve_vl_vmulswsx_vsvvl
+#define _vel_vmulswsx_vvvmvl __builtin_ve_vl_vmulswsx_vvvmvl
+#define _vel_vmulswsx_vsvmvl __builtin_ve_vl_vmulswsx_vsvmvl
+#define _vel_vmulswzx_vvvl __builtin_ve_vl_vmulswzx_vvvl
+#define _vel_vmulswzx_vvvvl __builtin_ve_vl_vmulswzx_vvvvl
+#define _vel_vmulswzx_vsvl __builtin_ve_vl_vmulswzx_vsvl
+#define _vel_vmulswzx_vsvvl __builtin_ve_vl_vmulswzx_vsvvl
+#define _vel_vmulswzx_vvvmvl __builtin_ve_vl_vmulswzx_vvvmvl
+#define _vel_vmulswzx_vsvmvl __builtin_ve_vl_vmulswzx_vsvmvl
+#define _vel_vmulsl_vvvl __builtin_ve_vl_vmulsl_vvvl
+#define _vel_vmulsl_vvvvl __builtin_ve_vl_vmulsl_vvvvl
+#define _vel_vmulsl_vsvl __builtin_ve_vl_vmulsl_vsvl
+#define _vel_vmulsl_vsvvl __builtin_ve_vl_vmulsl_vsvvl
+#define _vel_vmulsl_vvvmvl __builtin_ve_vl_vmulsl_vvvmvl
+#define _vel_vmulsl_vsvmvl __builtin_ve_vl_vmulsl_vsvmvl
+#define _vel_vmulslw_vvvl __builtin_ve_vl_vmulslw_vvvl
+#define _vel_vmulslw_vvvvl __builtin_ve_vl_vmulslw_vvvvl
+#define _vel_vmulslw_vsvl __builtin_ve_vl_vmulslw_vsvl
+#define _vel_vmulslw_vsvvl __builtin_ve_vl_vmulslw_vsvvl
+#define _vel_vdivul_vvvl __builtin_ve_vl_vdivul_vvvl
+#define _vel_vdivul_vvvvl __builtin_ve_vl_vdivul_vvvvl
+#define _vel_vdivul_vsvl __builtin_ve_vl_vdivul_vsvl
+#define _vel_vdivul_vsvvl __builtin_ve_vl_vdivul_vsvvl
+#define _vel_vdivul_vvvmvl __builtin_ve_vl_vdivul_vvvmvl
+#define _vel_vdivul_vsvmvl __builtin_ve_vl_vdivul_vsvmvl
+#define _vel_vdivuw_vvvl __builtin_ve_vl_vdivuw_vvvl
+#define _vel_vdivuw_vvvvl __builtin_ve_vl_vdivuw_vvvvl
+#define _vel_vdivuw_vsvl __builtin_ve_vl_vdivuw_vsvl
+#define _vel_vdivuw_vsvvl __builtin_ve_vl_vdivuw_vsvvl
+#define _vel_vdivuw_vvvmvl __builtin_ve_vl_vdivuw_vvvmvl
+#define _vel_vdivuw_vsvmvl __builtin_ve_vl_vdivuw_vsvmvl
+#define _vel_vdivul_vvsl __builtin_ve_vl_vdivul_vvsl
+#define _vel_vdivul_vvsvl __builtin_ve_vl_vdivul_vvsvl
+#define _vel_vdivul_vvsmvl __builtin_ve_vl_vdivul_vvsmvl
+#define _vel_vdivuw_vvsl __builtin_ve_vl_vdivuw_vvsl
+#define _vel_vdivuw_vvsvl __builtin_ve_vl_vdivuw_vvsvl
+#define _vel_vdivuw_vvsmvl __builtin_ve_vl_vdivuw_vvsmvl
+#define _vel_vdivswsx_vvvl __builtin_ve_vl_vdivswsx_vvvl
+#define _vel_vdivswsx_vvvvl __builtin_ve_vl_vdivswsx_vvvvl
+#define _vel_vdivswsx_vsvl __builtin_ve_vl_vdivswsx_vsvl
+#define _vel_vdivswsx_vsvvl __builtin_ve_vl_vdivswsx_vsvvl
+#define _vel_vdivswsx_vvvmvl __builtin_ve_vl_vdivswsx_vvvmvl
+#define _vel_vdivswsx_vsvmvl __builtin_ve_vl_vdivswsx_vsvmvl
+#define _vel_vdivswzx_vvvl __builtin_ve_vl_vdivswzx_vvvl
+#define _vel_vdivswzx_vvvvl __builtin_ve_vl_vdivswzx_vvvvl
+#define _vel_vdivswzx_vsvl __builtin_ve_vl_vdivswzx_vsvl
+#define _vel_vdivswzx_vsvvl __builtin_ve_vl_vdivswzx_vsvvl
+#define _vel_vdivswzx_vvvmvl __builtin_ve_vl_vdivswzx_vvvmvl
+#define _vel_vdivswzx_vsvmvl __builtin_ve_vl_vdivswzx_vsvmvl
+#define _vel_vdivswsx_vvsl __builtin_ve_vl_vdivswsx_vvsl
+#define _vel_vdivswsx_vvsvl __builtin_ve_vl_vdivswsx_vvsvl
+#define _vel_vdivswsx_vvsmvl __builtin_ve_vl_vdivswsx_vvsmvl
+#define _vel_vdivswzx_vvsl __builtin_ve_vl_vdivswzx_vvsl
+#define _vel_vdivswzx_vvsvl __builtin_ve_vl_vdivswzx_vvsvl
+#define _vel_vdivswzx_vvsmvl __builtin_ve_vl_vdivswzx_vvsmvl
+#define _vel_vdivsl_vvvl __builtin_ve_vl_vdivsl_vvvl
+#define _vel_vdivsl_vvvvl __builtin_ve_vl_vdivsl_vvvvl
+#define _vel_vdivsl_vsvl __builtin_ve_vl_vdivsl_vsvl
+#define _vel_vdivsl_vsvvl __builtin_ve_vl_vdivsl_vsvvl
+#define _vel_vdivsl_vvvmvl __builtin_ve_vl_vdivsl_vvvmvl
+#define _vel_vdivsl_vsvmvl __builtin_ve_vl_vdivsl_vsvmvl
+#define _vel_vdivsl_vvsl __builtin_ve_vl_vdivsl_vvsl
+#define _vel_vdivsl_vvsvl __builtin_ve_vl_vdivsl_vvsvl
+#define _vel_vdivsl_vvsmvl __builtin_ve_vl_vdivsl_vvsmvl
+#define _vel_vcmpul_vvvl __builtin_ve_vl_vcmpul_vvvl
+#define _vel_vcmpul_vvvvl __builtin_ve_vl_vcmpul_vvvvl
+#define _vel_vcmpul_vsvl __builtin_ve_vl_vcmpul_vsvl
+#define _vel_vcmpul_vsvvl __builtin_ve_vl_vcmpul_vsvvl
+#define _vel_vcmpul_vvvmvl __builtin_ve_vl_vcmpul_vvvmvl
+#define _vel_vcmpul_vsvmvl __builtin_ve_vl_vcmpul_vsvmvl
+#define _vel_vcmpuw_vvvl __builtin_ve_vl_vcmpuw_vvvl
+#define _vel_vcmpuw_vvvvl __builtin_ve_vl_vcmpuw_vvvvl
+#define _vel_vcmpuw_vsvl __builtin_ve_vl_vcmpuw_vsvl
+#define _vel_vcmpuw_vsvvl __builtin_ve_vl_vcmpuw_vsvvl
+#define _vel_vcmpuw_vvvmvl __builtin_ve_vl_vcmpuw_vvvmvl
+#define _vel_vcmpuw_vsvmvl __builtin_ve_vl_vcmpuw_vsvmvl
+#define _vel_pvcmpu_vvvl __builtin_ve_vl_pvcmpu_vvvl
+#define _vel_pvcmpu_vvvvl __builtin_ve_vl_pvcmpu_vvvvl
+#define _vel_pvcmpu_vsvl __builtin_ve_vl_pvcmpu_vsvl
+#define _vel_pvcmpu_vsvvl __builtin_ve_vl_pvcmpu_vsvvl
+#define _vel_pvcmpu_vvvMvl __builtin_ve_vl_pvcmpu_vvvMvl
+#define _vel_pvcmpu_vsvMvl __builtin_ve_vl_pvcmpu_vsvMvl
+#define _vel_vcmpswsx_vvvl __builtin_ve_vl_vcmpswsx_vvvl
+#define _vel_vcmpswsx_vvvvl __builtin_ve_vl_vcmpswsx_vvvvl
+#define _vel_vcmpswsx_vsvl __builtin_ve_vl_vcmpswsx_vsvl
+#define _vel_vcmpswsx_vsvvl __builtin_ve_vl_vcmpswsx_vsvvl
+#define _vel_vcmpswsx_vvvmvl __builtin_ve_vl_vcmpswsx_vvvmvl
+#define _vel_vcmpswsx_vsvmvl __builtin_ve_vl_vcmpswsx_vsvmvl
+#define _vel_vcmpswzx_vvvl __builtin_ve_vl_vcmpswzx_vvvl
+#define _vel_vcmpswzx_vvvvl __builtin_ve_vl_vcmpswzx_vvvvl
+#define _vel_vcmpswzx_vsvl __builtin_ve_vl_vcmpswzx_vsvl
+#define _vel_vcmpswzx_vsvvl __builtin_ve_vl_vcmpswzx_vsvvl
+#define _vel_vcmpswzx_vvvmvl __builtin_ve_vl_vcmpswzx_vvvmvl
+#define _vel_vcmpswzx_vsvmvl __builtin_ve_vl_vcmpswzx_vsvmvl
+#define _vel_pvcmps_vvvl __builtin_ve_vl_pvcmps_vvvl
+#define _vel_pvcmps_vvvvl __builtin_ve_vl_pvcmps_vvvvl
+#define _vel_pvcmps_vsvl __builtin_ve_vl_pvcmps_vsvl
+#define _vel_pvcmps_vsvvl __builtin_ve_vl_pvcmps_vsvvl
+#define _vel_pvcmps_vvvMvl __builtin_ve_vl_pvcmps_vvvMvl
+#define _vel_pvcmps_vsvMvl __builtin_ve_vl_pvcmps_vsvMvl
+#define _vel_vcmpsl_vvvl __builtin_ve_vl_vcmpsl_vvvl
+#define _vel_vcmpsl_vvvvl __builtin_ve_vl_vcmpsl_vvvvl
+#define _vel_vcmpsl_vsvl __builtin_ve_vl_vcmpsl_vsvl
+#define _vel_vcmpsl_vsvvl __builtin_ve_vl_vcmpsl_vsvvl
+#define _vel_vcmpsl_vvvmvl __builtin_ve_vl_vcmpsl_vvvmvl
+#define _vel_vcmpsl_vsvmvl __builtin_ve_vl_vcmpsl_vsvmvl
+#define _vel_vmaxswsx_vvvl __builtin_ve_vl_vmaxswsx_vvvl
+#define _vel_vmaxswsx_vvvvl __builtin_ve_vl_vmaxswsx_vvvvl
+#define _vel_vmaxswsx_vsvl __builtin_ve_vl_vmaxswsx_vsvl
+#define _vel_vmaxswsx_vsvvl __builtin_ve_vl_vmaxswsx_vsvvl
+#define _vel_vmaxswsx_vvvmvl __builtin_ve_vl_vmaxswsx_vvvmvl
+#define _vel_vmaxswsx_vsvmvl __builtin_ve_vl_vmaxswsx_vsvmvl
+#define _vel_vmaxswzx_vvvl __builtin_ve_vl_vmaxswzx_vvvl
+#define _vel_vmaxswzx_vvvvl __builtin_ve_vl_vmaxswzx_vvvvl
+#define _vel_vmaxswzx_vsvl __builtin_ve_vl_vmaxswzx_vsvl
+#define _vel_vmaxswzx_vsvvl __builtin_ve_vl_vmaxswzx_vsvvl
+#define _vel_vmaxswzx_vvvmvl __builtin_ve_vl_vmaxswzx_vvvmvl
+#define _vel_vmaxswzx_vsvmvl __builtin_ve_vl_vmaxswzx_vsvmvl
+#define _vel_pvmaxs_vvvl __builtin_ve_vl_pvmaxs_vvvl
+#define _vel_pvmaxs_vvvvl __builtin_ve_vl_pvmaxs_vvvvl
+#define _vel_pvmaxs_vsvl __builtin_ve_vl_pvmaxs_vsvl
+#define _vel_pvmaxs_vsvvl __builtin_ve_vl_pvmaxs_vsvvl
+#define _vel_pvmaxs_vvvMvl __builtin_ve_vl_pvmaxs_vvvMvl
+#define _vel_pvmaxs_vsvMvl __builtin_ve_vl_pvmaxs_vsvMvl
+#define _vel_vminswsx_vvvl __builtin_ve_vl_vminswsx_vvvl
+#define _vel_vminswsx_vvvvl __builtin_ve_vl_vminswsx_vvvvl
+#define _vel_vminswsx_vsvl __builtin_ve_vl_vminswsx_vsvl
+#define _vel_vminswsx_vsvvl __builtin_ve_vl_vminswsx_vsvvl
+#define _vel_vminswsx_vvvmvl __builtin_ve_vl_vminswsx_vvvmvl
+#define _vel_vminswsx_vsvmvl __builtin_ve_vl_vminswsx_vsvmvl
+#define _vel_vminswzx_vvvl __builtin_ve_vl_vminswzx_vvvl
+#define _vel_vminswzx_vvvvl __builtin_ve_vl_vminswzx_vvvvl
+#define _vel_vminswzx_vsvl __builtin_ve_vl_vminswzx_vsvl
+#define _vel_vminswzx_vsvvl __builtin_ve_vl_vminswzx_vsvvl
+#define _vel_vminswzx_vvvmvl __builtin_ve_vl_vminswzx_vvvmvl
+#define _vel_vminswzx_vsvmvl __builtin_ve_vl_vminswzx_vsvmvl
+#define _vel_pvmins_vvvl __builtin_ve_vl_pvmins_vvvl
+#define _vel_pvmins_vvvvl __builtin_ve_vl_pvmins_vvvvl
+#define _vel_pvmins_vsvl __builtin_ve_vl_pvmins_vsvl
+#define _vel_pvmins_vsvvl __builtin_ve_vl_pvmins_vsvvl
+#define _vel_pvmins_vvvMvl __builtin_ve_vl_pvmins_vvvMvl
+#define _vel_pvmins_vsvMvl __builtin_ve_vl_pvmins_vsvMvl
+#define _vel_vmaxsl_vvvl __builtin_ve_vl_vmaxsl_vvvl
+#define _vel_vmaxsl_vvvvl __builtin_ve_vl_vmaxsl_vvvvl
+#define _vel_vmaxsl_vsvl __builtin_ve_vl_vmaxsl_vsvl
+#define _vel_vmaxsl_vsvvl __builtin_ve_vl_vmaxsl_vsvvl
+#define _vel_vmaxsl_vvvmvl __builtin_ve_vl_vmaxsl_vvvmvl
+#define _vel_vmaxsl_vsvmvl __builtin_ve_vl_vmaxsl_vsvmvl
+#define _vel_vminsl_vvvl __builtin_ve_vl_vminsl_vvvl
+#define _vel_vminsl_vvvvl __builtin_ve_vl_vminsl_vvvvl
+#define _vel_vminsl_vsvl __builtin_ve_vl_vminsl_vsvl
+#define _vel_vminsl_vsvvl __builtin_ve_vl_vminsl_vsvvl
+#define _vel_vminsl_vvvmvl __builtin_ve_vl_vminsl_vvvmvl
+#define _vel_vminsl_vsvmvl __builtin_ve_vl_vminsl_vsvmvl
+#define _vel_vand_vvvl __builtin_ve_vl_vand_vvvl
+#define _vel_vand_vvvvl __builtin_ve_vl_vand_vvvvl
+#define _vel_vand_vsvl __builtin_ve_vl_vand_vsvl
+#define _vel_vand_vsvvl __builtin_ve_vl_vand_vsvvl
+#define _vel_vand_vvvmvl __builtin_ve_vl_vand_vvvmvl
+#define _vel_vand_vsvmvl __builtin_ve_vl_vand_vsvmvl
+#define _vel_pvand_vvvl __builtin_ve_vl_pvand_vvvl
+#define _vel_pvand_vvvvl __builtin_ve_vl_pvand_vvvvl
+#define _vel_pvand_vsvl __builtin_ve_vl_pvand_vsvl
+#define _vel_pvand_vsvvl __builtin_ve_vl_pvand_vsvvl
+#define _vel_pvand_vvvMvl __builtin_ve_vl_pvand_vvvMvl
+#define _vel_pvand_vsvMvl __builtin_ve_vl_pvand_vsvMvl
+#define _vel_vor_vvvl __builtin_ve_vl_vor_vvvl
+#define _vel_vor_vvvvl __builtin_ve_vl_vor_vvvvl
+#define _vel_vor_vsvl __builtin_ve_vl_vor_vsvl
+#define _vel_vor_vsvvl __builtin_ve_vl_vor_vsvvl
+#define _vel_vor_vvvmvl __builtin_ve_vl_vor_vvvmvl
+#define _vel_vor_vsvmvl __builtin_ve_vl_vor_vsvmvl
+#define _vel_pvor_vvvl __builtin_ve_vl_pvor_vvvl
+#define _vel_pvor_vvvvl __builtin_ve_vl_pvor_vvvvl
+#define _vel_pvor_vsvl __builtin_ve_vl_pvor_vsvl
+#define _vel_pvor_vsvvl __builtin_ve_vl_pvor_vsvvl
+#define _vel_pvor_vvvMvl __builtin_ve_vl_pvor_vvvMvl
+#define _vel_pvor_vsvMvl __builtin_ve_vl_pvor_vsvMvl
+#define _vel_vxor_vvvl __builtin_ve_vl_vxor_vvvl
+#define _vel_vxor_vvvvl __builtin_ve_vl_vxor_vvvvl
+#define _vel_vxor_vsvl __builtin_ve_vl_vxor_vsvl
+#define _vel_vxor_vsvvl __builtin_ve_vl_vxor_vsvvl
+#define _vel_vxor_vvvmvl __builtin_ve_vl_vxor_vvvmvl
+#define _vel_vxor_vsvmvl __builtin_ve_vl_vxor_vsvmvl
+#define _vel_pvxor_vvvl __builtin_ve_vl_pvxor_vvvl
+#define _vel_pvxor_vvvvl __builtin_ve_vl_pvxor_vvvvl
+#define _vel_pvxor_vsvl __builtin_ve_vl_pvxor_vsvl
+#define _vel_pvxor_vsvvl __builtin_ve_vl_pvxor_vsvvl
+#define _vel_pvxor_vvvMvl __builtin_ve_vl_pvxor_vvvMvl
+#define _vel_pvxor_vsvMvl __builtin_ve_vl_pvxor_vsvMvl
+#define _vel_veqv_vvvl __builtin_ve_vl_veqv_vvvl
+#define _vel_veqv_vvvvl __builtin_ve_vl_veqv_vvvvl
+#define _vel_veqv_vsvl __builtin_ve_vl_veqv_vsvl
+#define _vel_veqv_vsvvl __builtin_ve_vl_veqv_vsvvl
+#define _vel_veqv_vvvmvl __builtin_ve_vl_veqv_vvvmvl
+#define _vel_veqv_vsvmvl __builtin_ve_vl_veqv_vsvmvl
+#define _vel_pveqv_vvvl __builtin_ve_vl_pveqv_vvvl
+#define _vel_pveqv_vvvvl __builtin_ve_vl_pveqv_vvvvl
+#define _vel_pveqv_vsvl __builtin_ve_vl_pveqv_vsvl
+#define _vel_pveqv_vsvvl __builtin_ve_vl_pveqv_vsvvl
+#define _vel_pveqv_vvvMvl __builtin_ve_vl_pveqv_vvvMvl
+#define _vel_pveqv_vsvMvl __builtin_ve_vl_pveqv_vsvMvl
+#define _vel_vldz_vvl __builtin_ve_vl_vldz_vvl
+#define _vel_vldz_vvvl __builtin_ve_vl_vldz_vvvl
+#define _vel_vldz_vvmvl __builtin_ve_vl_vldz_vvmvl
+#define _vel_pvldzlo_vvl __builtin_ve_vl_pvldzlo_vvl
+#define _vel_pvldzlo_vvvl __builtin_ve_vl_pvldzlo_vvvl
+#define _vel_pvldzlo_vvmvl __builtin_ve_vl_pvldzlo_vvmvl
+#define _vel_pvldzup_vvl __builtin_ve_vl_pvldzup_vvl
+#define _vel_pvldzup_vvvl __builtin_ve_vl_pvldzup_vvvl
+#define _vel_pvldzup_vvmvl __builtin_ve_vl_pvldzup_vvmvl
+#define _vel_pvldz_vvl __builtin_ve_vl_pvldz_vvl
+#define _vel_pvldz_vvvl __builtin_ve_vl_pvldz_vvvl
+#define _vel_pvldz_vvMvl __builtin_ve_vl_pvldz_vvMvl
+#define _vel_vpcnt_vvl __builtin_ve_vl_vpcnt_vvl
+#define _vel_vpcnt_vvvl __builtin_ve_vl_vpcnt_vvvl
+#define _vel_vpcnt_vvmvl __builtin_ve_vl_vpcnt_vvmvl
+#define _vel_pvpcntlo_vvl __builtin_ve_vl_pvpcntlo_vvl
+#define _vel_pvpcntlo_vvvl __builtin_ve_vl_pvpcntlo_vvvl
+#define _vel_pvpcntlo_vvmvl __builtin_ve_vl_pvpcntlo_vvmvl
+#define _vel_pvpcntup_vvl __builtin_ve_vl_pvpcntup_vvl
+#define _vel_pvpcntup_vvvl __builtin_ve_vl_pvpcntup_vvvl
+#define _vel_pvpcntup_vvmvl __builtin_ve_vl_pvpcntup_vvmvl
+#define _vel_pvpcnt_vvl __builtin_ve_vl_pvpcnt_vvl
+#define _vel_pvpcnt_vvvl __builtin_ve_vl_pvpcnt_vvvl
+#define _vel_pvpcnt_vvMvl __builtin_ve_vl_pvpcnt_vvMvl
+#define _vel_vbrv_vvl __builtin_ve_vl_vbrv_vvl
+#define _vel_vbrv_vvvl __builtin_ve_vl_vbrv_vvvl
+#define _vel_vbrv_vvmvl __builtin_ve_vl_vbrv_vvmvl
+#define _vel_pvbrvlo_vvl __builtin_ve_vl_pvbrvlo_vvl
+#define _vel_pvbrvlo_vvvl __builtin_ve_vl_pvbrvlo_vvvl
+#define _vel_pvbrvlo_vvmvl __builtin_ve_vl_pvbrvlo_vvmvl
+#define _vel_pvbrvup_vvl __builtin_ve_vl_pvbrvup_vvl
+#define _vel_pvbrvup_vvvl __builtin_ve_vl_pvbrvup_vvvl
+#define _vel_pvbrvup_vvmvl __builtin_ve_vl_pvbrvup_vvmvl
+#define _vel_pvbrv_vvl __builtin_ve_vl_pvbrv_vvl
+#define _vel_pvbrv_vvvl __builtin_ve_vl_pvbrv_vvvl
+#define _vel_pvbrv_vvMvl __builtin_ve_vl_pvbrv_vvMvl
+#define _vel_vseq_vl __builtin_ve_vl_vseq_vl
+#define _vel_vseq_vvl __builtin_ve_vl_vseq_vvl
+#define _vel_pvseqlo_vl __builtin_ve_vl_pvseqlo_vl
+#define _vel_pvseqlo_vvl __builtin_ve_vl_pvseqlo_vvl
+#define _vel_pvsequp_vl __builtin_ve_vl_pvsequp_vl
+#define _vel_pvsequp_vvl __builtin_ve_vl_pvsequp_vvl
+#define _vel_pvseq_vl __builtin_ve_vl_pvseq_vl
+#define _vel_pvseq_vvl __builtin_ve_vl_pvseq_vvl
+#define _vel_vsll_vvvl __builtin_ve_vl_vsll_vvvl
+#define _vel_vsll_vvvvl __builtin_ve_vl_vsll_vvvvl
+#define _vel_vsll_vvsl __builtin_ve_vl_vsll_vvsl
+#define _vel_vsll_vvsvl __builtin_ve_vl_vsll_vvsvl
+#define _vel_vsll_vvvmvl __builtin_ve_vl_vsll_vvvmvl
+#define _vel_vsll_vvsmvl __builtin_ve_vl_vsll_vvsmvl
+#define _vel_pvsll_vvvl __builtin_ve_vl_pvsll_vvvl
+#define _vel_pvsll_vvvvl __builtin_ve_vl_pvsll_vvvvl
+#define _vel_pvsll_vvsl __builtin_ve_vl_pvsll_vvsl
+#define _vel_pvsll_vvsvl __builtin_ve_vl_pvsll_vvsvl
+#define _vel_pvsll_vvvMvl __builtin_ve_vl_pvsll_vvvMvl
+#define _vel_pvsll_vvsMvl __builtin_ve_vl_pvsll_vvsMvl
+#define _vel_vsrl_vvvl __builtin_ve_vl_vsrl_vvvl
+#define _vel_vsrl_vvvvl __builtin_ve_vl_vsrl_vvvvl
+#define _vel_vsrl_vvsl __builtin_ve_vl_vsrl_vvsl
+#define _vel_vsrl_vvsvl __builtin_ve_vl_vsrl_vvsvl
+#define _vel_vsrl_vvvmvl __builtin_ve_vl_vsrl_vvvmvl
+#define _vel_vsrl_vvsmvl __builtin_ve_vl_vsrl_vvsmvl
+#define _vel_pvsrl_vvvl __builtin_ve_vl_pvsrl_vvvl
+#define _vel_pvsrl_vvvvl __builtin_ve_vl_pvsrl_vvvvl
+#define _vel_pvsrl_vvsl __builtin_ve_vl_pvsrl_vvsl
+#define _vel_pvsrl_vvsvl __builtin_ve_vl_pvsrl_vvsvl
+#define _vel_pvsrl_vvvMvl __builtin_ve_vl_pvsrl_vvvMvl
+#define _vel_pvsrl_vvsMvl __builtin_ve_vl_pvsrl_vvsMvl
+#define _vel_vslawsx_vvvl __builtin_ve_vl_vslawsx_vvvl
+#define _vel_vslawsx_vvvvl __builtin_ve_vl_vslawsx_vvvvl
+#define _vel_vslawsx_vvsl __builtin_ve_vl_vslawsx_vvsl
+#define _vel_vslawsx_vvsvl __builtin_ve_vl_vslawsx_vvsvl
+#define _vel_vslawsx_vvvmvl __builtin_ve_vl_vslawsx_vvvmvl
+#define _vel_vslawsx_vvsmvl __builtin_ve_vl_vslawsx_vvsmvl
+#define _vel_vslawzx_vvvl __builtin_ve_vl_vslawzx_vvvl
+#define _vel_vslawzx_vvvvl __builtin_ve_vl_vslawzx_vvvvl
+#define _vel_vslawzx_vvsl __builtin_ve_vl_vslawzx_vvsl
+#define _vel_vslawzx_vvsvl __builtin_ve_vl_vslawzx_vvsvl
+#define _vel_vslawzx_vvvmvl __builtin_ve_vl_vslawzx_vvvmvl
+#define _vel_vslawzx_vvsmvl __builtin_ve_vl_vslawzx_vvsmvl
+#define _vel_pvsla_vvvl __builtin_ve_vl_pvsla_vvvl
+#define _vel_pvsla_vvvvl __builtin_ve_vl_pvsla_vvvvl
+#define _vel_pvsla_vvsl __builtin_ve_vl_pvsla_vvsl
+#define _vel_pvsla_vvsvl __builtin_ve_vl_pvsla_vvsvl
+#define _vel_pvsla_vvvMvl __builtin_ve_vl_pvsla_vvvMvl
+#define _vel_pvsla_vvsMvl __builtin_ve_vl_pvsla_vvsMvl
+#define _vel_vslal_vvvl __builtin_ve_vl_vslal_vvvl
+#define _vel_vslal_vvvvl __builtin_ve_vl_vslal_vvvvl
+#define _vel_vslal_vvsl __builtin_ve_vl_vslal_vvsl
+#define _vel_vslal_vvsvl __builtin_ve_vl_vslal_vvsvl
+#define _vel_vslal_vvvmvl __builtin_ve_vl_vslal_vvvmvl
+#define _vel_vslal_vvsmvl __builtin_ve_vl_vslal_vvsmvl
+#define _vel_vsrawsx_vvvl __builtin_ve_vl_vsrawsx_vvvl
+#define _vel_vsrawsx_vvvvl __builtin_ve_vl_vsrawsx_vvvvl
+#define _vel_vsrawsx_vvsl __builtin_ve_vl_vsrawsx_vvsl
+#define _vel_vsrawsx_vvsvl __builtin_ve_vl_vsrawsx_vvsvl
+#define _vel_vsrawsx_vvvmvl __builtin_ve_vl_vsrawsx_vvvmvl
+#define _vel_vsrawsx_vvsmvl __builtin_ve_vl_vsrawsx_vvsmvl
+#define _vel_vsrawzx_vvvl __builtin_ve_vl_vsrawzx_vvvl
+#define _vel_vsrawzx_vvvvl __builtin_ve_vl_vsrawzx_vvvvl
+#define _vel_vsrawzx_vvsl __builtin_ve_vl_vsrawzx_vvsl
+#define _vel_vsrawzx_vvsvl __builtin_ve_vl_vsrawzx_vvsvl
+#define _vel_vsrawzx_vvvmvl __builtin_ve_vl_vsrawzx_vvvmvl
+#define _vel_vsrawzx_vvsmvl __builtin_ve_vl_vsrawzx_vvsmvl
+#define _vel_pvsra_vvvl __builtin_ve_vl_pvsra_vvvl
+#define _vel_pvsra_vvvvl __builtin_ve_vl_pvsra_vvvvl
+#define _vel_pvsra_vvsl __builtin_ve_vl_pvsra_vvsl
+#define _vel_pvsra_vvsvl __builtin_ve_vl_pvsra_vvsvl
+#define _vel_pvsra_vvvMvl __builtin_ve_vl_pvsra_vvvMvl
+#define _vel_pvsra_vvsMvl __builtin_ve_vl_pvsra_vvsMvl
+#define _vel_vsral_vvvl __builtin_ve_vl_vsral_vvvl
+#define _vel_vsral_vvvvl __builtin_ve_vl_vsral_vvvvl
+#define _vel_vsral_vvsl __builtin_ve_vl_vsral_vvsl
+#define _vel_vsral_vvsvl __builtin_ve_vl_vsral_vvsvl
+#define _vel_vsral_vvvmvl __builtin_ve_vl_vsral_vvvmvl
+#define _vel_vsral_vvsmvl __builtin_ve_vl_vsral_vvsmvl
+#define _vel_vsfa_vvssl __builtin_ve_vl_vsfa_vvssl
+#define _vel_vsfa_vvssvl __builtin_ve_vl_vsfa_vvssvl
+#define _vel_vsfa_vvssmvl __builtin_ve_vl_vsfa_vvssmvl
+#define _vel_vfaddd_vvvl __builtin_ve_vl_vfaddd_vvvl
+#define _vel_vfaddd_vvvvl __builtin_ve_vl_vfaddd_vvvvl
+#define _vel_vfaddd_vsvl __builtin_ve_vl_vfaddd_vsvl
+#define _vel_vfaddd_vsvvl __builtin_ve_vl_vfaddd_vsvvl
+#define _vel_vfaddd_vvvmvl __builtin_ve_vl_vfaddd_vvvmvl
+#define _vel_vfaddd_vsvmvl __builtin_ve_vl_vfaddd_vsvmvl
+#define _vel_vfadds_vvvl __builtin_ve_vl_vfadds_vvvl
+#define _vel_vfadds_vvvvl __builtin_ve_vl_vfadds_vvvvl
+#define _vel_vfadds_vsvl __builtin_ve_vl_vfadds_vsvl
+#define _vel_vfadds_vsvvl __builtin_ve_vl_vfadds_vsvvl
+#define _vel_vfadds_vvvmvl __builtin_ve_vl_vfadds_vvvmvl
+#define _vel_vfadds_vsvmvl __builtin_ve_vl_vfadds_vsvmvl
+#define _vel_pvfadd_vvvl __builtin_ve_vl_pvfadd_vvvl
+#define _vel_pvfadd_vvvvl __builtin_ve_vl_pvfadd_vvvvl
+#define _vel_pvfadd_vsvl __builtin_ve_vl_pvfadd_vsvl
+#define _vel_pvfadd_vsvvl __builtin_ve_vl_pvfadd_vsvvl
+#define _vel_pvfadd_vvvMvl __builtin_ve_vl_pvfadd_vvvMvl
+#define _vel_pvfadd_vsvMvl __builtin_ve_vl_pvfadd_vsvMvl
+#define _vel_vfsubd_vvvl __builtin_ve_vl_vfsubd_vvvl
+#define _vel_vfsubd_vvvvl __builtin_ve_vl_vfsubd_vvvvl
+#define _vel_vfsubd_vsvl __builtin_ve_vl_vfsubd_vsvl
+#define _vel_vfsubd_vsvvl __builtin_ve_vl_vfsubd_vsvvl
+#define _vel_vfsubd_vvvmvl __builtin_ve_vl_vfsubd_vvvmvl
+#define _vel_vfsubd_vsvmvl __builtin_ve_vl_vfsubd_vsvmvl
+#define _vel_vfsubs_vvvl __builtin_ve_vl_vfsubs_vvvl
+#define _vel_vfsubs_vvvvl __builtin_ve_vl_vfsubs_vvvvl
+#define _vel_vfsubs_vsvl __builtin_ve_vl_vfsubs_vsvl
+#define _vel_vfsubs_vsvvl __builtin_ve_vl_vfsubs_vsvvl
+#define _vel_vfsubs_vvvmvl __builtin_ve_vl_vfsubs_vvvmvl
+#define _vel_vfsubs_vsvmvl __builtin_ve_vl_vfsubs_vsvmvl
+#define _vel_pvfsub_vvvl __builtin_ve_vl_pvfsub_vvvl
+#define _vel_pvfsub_vvvvl __builtin_ve_vl_pvfsub_vvvvl
+#define _vel_pvfsub_vsvl __builtin_ve_vl_pvfsub_vsvl
+#define _vel_pvfsub_vsvvl __builtin_ve_vl_pvfsub_vsvvl
+#define _vel_pvfsub_vvvMvl __builtin_ve_vl_pvfsub_vvvMvl
+#define _vel_pvfsub_vsvMvl __builtin_ve_vl_pvfsub_vsvMvl
+#define _vel_vfmuld_vvvl __builtin_ve_vl_vfmuld_vvvl
+#define _vel_vfmuld_vvvvl __builtin_ve_vl_vfmuld_vvvvl
+#define _vel_vfmuld_vsvl __builtin_ve_vl_vfmuld_vsvl
+#define _vel_vfmuld_vsvvl __builtin_ve_vl_vfmuld_vsvvl
+#define _vel_vfmuld_vvvmvl __builtin_ve_vl_vfmuld_vvvmvl
+#define _vel_vfmuld_vsvmvl __builtin_ve_vl_vfmuld_vsvmvl
+#define _vel_vfmuls_vvvl __builtin_ve_vl_vfmuls_vvvl
+#define _vel_vfmuls_vvvvl __builtin_ve_vl_vfmuls_vvvvl
+#define _vel_vfmuls_vsvl __builtin_ve_vl_vfmuls_vsvl
+#define _vel_vfmuls_vsvvl __builtin_ve_vl_vfmuls_vsvvl
+#define _vel_vfmuls_vvvmvl __builtin_ve_vl_vfmuls_vvvmvl
+#define _vel_vfmuls_vsvmvl __builtin_ve_vl_vfmuls_vsvmvl
+#define _vel_pvfmul_vvvl __builtin_ve_vl_pvfmul_vvvl
+#define _vel_pvfmul_vvvvl __builtin_ve_vl_pvfmul_vvvvl
+#define _vel_pvfmul_vsvl __builtin_ve_vl_pvfmul_vsvl
+#define _vel_pvfmul_vsvvl __builtin_ve_vl_pvfmul_vsvvl
+#define _vel_pvfmul_vvvMvl __builtin_ve_vl_pvfmul_vvvMvl
+#define _vel_pvfmul_vsvMvl __builtin_ve_vl_pvfmul_vsvMvl
+#define _vel_vfdivd_vvvl __builtin_ve_vl_vfdivd_vvvl
+#define _vel_vfdivd_vvvvl __builtin_ve_vl_vfdivd_vvvvl
+#define _vel_vfdivd_vsvl __builtin_ve_vl_vfdivd_vsvl
+#define _vel_vfdivd_vsvvl __builtin_ve_vl_vfdivd_vsvvl
+#define _vel_vfdivd_vvvmvl __builtin_ve_vl_vfdivd_vvvmvl
+#define _vel_vfdivd_vsvmvl __builtin_ve_vl_vfdivd_vsvmvl
+#define _vel_vfdivs_vvvl __builtin_ve_vl_vfdivs_vvvl
+#define _vel_vfdivs_vvvvl __builtin_ve_vl_vfdivs_vvvvl
+#define _vel_vfdivs_vsvl __builtin_ve_vl_vfdivs_vsvl
+#define _vel_vfdivs_vsvvl __builtin_ve_vl_vfdivs_vsvvl
+#define _vel_vfdivs_vvvmvl __builtin_ve_vl_vfdivs_vvvmvl
+#define _vel_vfdivs_vsvmvl __builtin_ve_vl_vfdivs_vsvmvl
+#define _vel_vfsqrtd_vvl __builtin_ve_vl_vfsqrtd_vvl
+#define _vel_vfsqrtd_vvvl __builtin_ve_vl_vfsqrtd_vvvl
+#define _vel_vfsqrts_vvl __builtin_ve_vl_vfsqrts_vvl
+#define _vel_vfsqrts_vvvl __builtin_ve_vl_vfsqrts_vvvl
+#define _vel_vfcmpd_vvvl __builtin_ve_vl_vfcmpd_vvvl
+#define _vel_vfcmpd_vvvvl __builtin_ve_vl_vfcmpd_vvvvl
+#define _vel_vfcmpd_vsvl __builtin_ve_vl_vfcmpd_vsvl
+#define _vel_vfcmpd_vsvvl __builtin_ve_vl_vfcmpd_vsvvl
+#define _vel_vfcmpd_vvvmvl __builtin_ve_vl_vfcmpd_vvvmvl
+#define _vel_vfcmpd_vsvmvl __builtin_ve_vl_vfcmpd_vsvmvl
+#define _vel_vfcmps_vvvl __builtin_ve_vl_vfcmps_vvvl
+#define _vel_vfcmps_vvvvl __builtin_ve_vl_vfcmps_vvvvl
+#define _vel_vfcmps_vsvl __builtin_ve_vl_vfcmps_vsvl
+#define _vel_vfcmps_vsvvl __builtin_ve_vl_vfcmps_vsvvl
+#define _vel_vfcmps_vvvmvl __builtin_ve_vl_vfcmps_vvvmvl
+#define _vel_vfcmps_vsvmvl __builtin_ve_vl_vfcmps_vsvmvl
+#define _vel_pvfcmp_vvvl __builtin_ve_vl_pvfcmp_vvvl
+#define _vel_pvfcmp_vvvvl __builtin_ve_vl_pvfcmp_vvvvl
+#define _vel_pvfcmp_vsvl __builtin_ve_vl_pvfcmp_vsvl
+#define _vel_pvfcmp_vsvvl __builtin_ve_vl_pvfcmp_vsvvl
+#define _vel_pvfcmp_vvvMvl __builtin_ve_vl_pvfcmp_vvvMvl
+#define _vel_pvfcmp_vsvMvl __builtin_ve_vl_pvfcmp_vsvMvl
+#define _vel_vfmaxd_vvvl __builtin_ve_vl_vfmaxd_vvvl
+#define _vel_vfmaxd_vvvvl __builtin_ve_vl_vfmaxd_vvvvl
+#define _vel_vfmaxd_vsvl __builtin_ve_vl_vfmaxd_vsvl
+#define _vel_vfmaxd_vsvvl __builtin_ve_vl_vfmaxd_vsvvl
+#define _vel_vfmaxd_vvvmvl __builtin_ve_vl_vfmaxd_vvvmvl
+#define _vel_vfmaxd_vsvmvl __builtin_ve_vl_vfmaxd_vsvmvl
+#define _vel_vfmaxs_vvvl __builtin_ve_vl_vfmaxs_vvvl
+#define _vel_vfmaxs_vvvvl __builtin_ve_vl_vfmaxs_vvvvl
+#define _vel_vfmaxs_vsvl __builtin_ve_vl_vfmaxs_vsvl
+#define _vel_vfmaxs_vsvvl __builtin_ve_vl_vfmaxs_vsvvl
+#define _vel_vfmaxs_vvvmvl __builtin_ve_vl_vfmaxs_vvvmvl
+#define _vel_vfmaxs_vsvmvl __builtin_ve_vl_vfmaxs_vsvmvl
+#define _vel_pvfmax_vvvl __builtin_ve_vl_pvfmax_vvvl
+#define _vel_pvfmax_vvvvl __builtin_ve_vl_pvfmax_vvvvl
+#define _vel_pvfmax_vsvl __builtin_ve_vl_pvfmax_vsvl
+#define _vel_pvfmax_vsvvl __builtin_ve_vl_pvfmax_vsvvl
+#define _vel_pvfmax_vvvMvl __builtin_ve_vl_pvfmax_vvvMvl
+#define _vel_pvfmax_vsvMvl __builtin_ve_vl_pvfmax_vsvMvl
+#define _vel_vfmind_vvvl __builtin_ve_vl_vfmind_vvvl
+#define _vel_vfmind_vvvvl __builtin_ve_vl_vfmind_vvvvl
+#define _vel_vfmind_vsvl __builtin_ve_vl_vfmind_vsvl
+#define _vel_vfmind_vsvvl __builtin_ve_vl_vfmind_vsvvl
+#define _vel_vfmind_vvvmvl __builtin_ve_vl_vfmind_vvvmvl
+#define _vel_vfmind_vsvmvl __builtin_ve_vl_vfmind_vsvmvl
+#define _vel_vfmins_vvvl __builtin_ve_vl_vfmins_vvvl
+#define _vel_vfmins_vvvvl __builtin_ve_vl_vfmins_vvvvl
+#define _vel_vfmins_vsvl __builtin_ve_vl_vfmins_vsvl
+#define _vel_vfmins_vsvvl __builtin_ve_vl_vfmins_vsvvl
+#define _vel_vfmins_vvvmvl __builtin_ve_vl_vfmins_vvvmvl
+#define _vel_vfmins_vsvmvl __builtin_ve_vl_vfmins_vsvmvl
+#define _vel_pvfmin_vvvl __builtin_ve_vl_pvfmin_vvvl
+#define _vel_pvfmin_vvvvl __builtin_ve_vl_pvfmin_vvvvl
+#define _vel_pvfmin_vsvl __builtin_ve_vl_pvfmin_vsvl
+#define _vel_pvfmin_vsvvl __builtin_ve_vl_pvfmin_vsvvl
+#define _vel_pvfmin_vvvMvl __builtin_ve_vl_pvfmin_vvvMvl
+#define _vel_pvfmin_vsvMvl __builtin_ve_vl_pvfmin_vsvMvl
+#define _vel_vfmadd_vvvvl __builtin_ve_vl_vfmadd_vvvvl
+#define _vel_vfmadd_vvvvvl __builtin_ve_vl_vfmadd_vvvvvl
+#define _vel_vfmadd_vsvvl __builtin_ve_vl_vfmadd_vsvvl
+#define _vel_vfmadd_vsvvvl __builtin_ve_vl_vfmadd_vsvvvl
+#define _vel_vfmadd_vvsvl __builtin_ve_vl_vfmadd_vvsvl
+#define _vel_vfmadd_vvsvvl __builtin_ve_vl_vfmadd_vvsvvl
+#define _vel_vfmadd_vvvvmvl __builtin_ve_vl_vfmadd_vvvvmvl
+#define _vel_vfmadd_vsvvmvl __builtin_ve_vl_vfmadd_vsvvmvl
+#define _vel_vfmadd_vvsvmvl __builtin_ve_vl_vfmadd_vvsvmvl
+#define _vel_vfmads_vvvvl __builtin_ve_vl_vfmads_vvvvl
+#define _vel_vfmads_vvvvvl __builtin_ve_vl_vfmads_vvvvvl
+#define _vel_vfmads_vsvvl __builtin_ve_vl_vfmads_vsvvl
+#define _vel_vfmads_vsvvvl __builtin_ve_vl_vfmads_vsvvvl
+#define _vel_vfmads_vvsvl __builtin_ve_vl_vfmads_vvsvl
+#define _vel_vfmads_vvsvvl __builtin_ve_vl_vfmads_vvsvvl
+#define _vel_vfmads_vvvvmvl __builtin_ve_vl_vfmads_vvvvmvl
+#define _vel_vfmads_vsvvmvl __builtin_ve_vl_vfmads_vsvvmvl
+#define _vel_vfmads_vvsvmvl __builtin_ve_vl_vfmads_vvsvmvl
+#define _vel_pvfmad_vvvvl __builtin_ve_vl_pvfmad_vvvvl
+#define _vel_pvfmad_vvvvvl __builtin_ve_vl_pvfmad_vvvvvl
+#define _vel_pvfmad_vsvvl __builtin_ve_vl_pvfmad_vsvvl
+#define _vel_pvfmad_vsvvvl __builtin_ve_vl_pvfmad_vsvvvl
+#define _vel_pvfmad_vvsvl __builtin_ve_vl_pvfmad_vvsvl
+#define _vel_pvfmad_vvsvvl __builtin_ve_vl_pvfmad_vvsvvl
+#define _vel_pvfmad_vvvvMvl __builtin_ve_vl_pvfmad_vvvvMvl
+#define _vel_pvfmad_vsvvMvl __builtin_ve_vl_pvfmad_vsvvMvl
+#define _vel_pvfmad_vvsvMvl __builtin_ve_vl_pvfmad_vvsvMvl
+#define _vel_vfmsbd_vvvvl __builtin_ve_vl_vfmsbd_vvvvl
+#define _vel_vfmsbd_vvvvvl __builtin_ve_vl_vfmsbd_vvvvvl
+#define _vel_vfmsbd_vsvvl __builtin_ve_vl_vfmsbd_vsvvl
+#define _vel_vfmsbd_vsvvvl __builtin_ve_vl_vfmsbd_vsvvvl
+#define _vel_vfmsbd_vvsvl __builtin_ve_vl_vfmsbd_vvsvl
+#define _vel_vfmsbd_vvsvvl __builtin_ve_vl_vfmsbd_vvsvvl
+#define _vel_vfmsbd_vvvvmvl __builtin_ve_vl_vfmsbd_vvvvmvl
+#define _vel_vfmsbd_vsvvmvl __builtin_ve_vl_vfmsbd_vsvvmvl
+#define _vel_vfmsbd_vvsvmvl __builtin_ve_vl_vfmsbd_vvsvmvl
+#define _vel_vfmsbs_vvvvl __builtin_ve_vl_vfmsbs_vvvvl
+#define _vel_vfmsbs_vvvvvl __builtin_ve_vl_vfmsbs_vvvvvl
+#define _vel_vfmsbs_vsvvl __builtin_ve_vl_vfmsbs_vsvvl
+#define _vel_vfmsbs_vsvvvl __builtin_ve_vl_vfmsbs_vsvvvl
+#define _vel_vfmsbs_vvsvl __builtin_ve_vl_vfmsbs_vvsvl
+#define _vel_vfmsbs_vvsvvl __builtin_ve_vl_vfmsbs_vvsvvl
+#define _vel_vfmsbs_vvvvmvl __builtin_ve_vl_vfmsbs_vvvvmvl
+#define _vel_vfmsbs_vsvvmvl __builtin_ve_vl_vfmsbs_vsvvmvl
+#define _vel_vfmsbs_vvsvmvl __builtin_ve_vl_vfmsbs_vvsvmvl
+#define _vel_pvfmsb_vvvvl __builtin_ve_vl_pvfmsb_vvvvl
+#define _vel_pvfmsb_vvvvvl __builtin_ve_vl_pvfmsb_vvvvvl
+#define _vel_pvfmsb_vsvvl __builtin_ve_vl_pvfmsb_vsvvl
+#define _vel_pvfmsb_vsvvvl __builtin_ve_vl_pvfmsb_vsvvvl
+#define _vel_pvfmsb_vvsvl __builtin_ve_vl_pvfmsb_vvsvl
+#define _vel_pvfmsb_vvsvvl __builtin_ve_vl_pvfmsb_vvsvvl
+#define _vel_pvfmsb_vvvvMvl __builtin_ve_vl_pvfmsb_vvvvMvl
+#define _vel_pvfmsb_vsvvMvl __builtin_ve_vl_pvfmsb_vsvvMvl
+#define _vel_pvfmsb_vvsvMvl __builtin_ve_vl_pvfmsb_vvsvMvl
+#define _vel_vfnmadd_vvvvl __builtin_ve_vl_vfnmadd_vvvvl
+#define _vel_vfnmadd_vvvvvl __builtin_ve_vl_vfnmadd_vvvvvl
+#define _vel_vfnmadd_vsvvl __builtin_ve_vl_vfnmadd_vsvvl
+#define _vel_vfnmadd_vsvvvl __builtin_ve_vl_vfnmadd_vsvvvl
+#define _vel_vfnmadd_vvsvl __builtin_ve_vl_vfnmadd_vvsvl
+#define _vel_vfnmadd_vvsvvl __builtin_ve_vl_vfnmadd_vvsvvl
+#define _vel_vfnmadd_vvvvmvl __builtin_ve_vl_vfnmadd_vvvvmvl
+#define _vel_vfnmadd_vsvvmvl __builtin_ve_vl_vfnmadd_vsvvmvl
+#define _vel_vfnmadd_vvsvmvl __builtin_ve_vl_vfnmadd_vvsvmvl
+#define _vel_vfnmads_vvvvl __builtin_ve_vl_vfnmads_vvvvl
+#define _vel_vfnmads_vvvvvl __builtin_ve_vl_vfnmads_vvvvvl
+#define _vel_vfnmads_vsvvl __builtin_ve_vl_vfnmads_vsvvl
+#define _vel_vfnmads_vsvvvl __builtin_ve_vl_vfnmads_vsvvvl
+#define _vel_vfnmads_vvsvl __builtin_ve_vl_vfnmads_vvsvl
+#define _vel_vfnmads_vvsvvl __builtin_ve_vl_vfnmads_vvsvvl
+#define _vel_vfnmads_vvvvmvl __builtin_ve_vl_vfnmads_vvvvmvl
+#define _vel_vfnmads_vsvvmvl __builtin_ve_vl_vfnmads_vsvvmvl
+#define _vel_vfnmads_vvsvmvl __builtin_ve_vl_vfnmads_vvsvmvl
+#define _vel_pvfnmad_vvvvl __builtin_ve_vl_pvfnmad_vvvvl
+#define _vel_pvfnmad_vvvvvl __builtin_ve_vl_pvfnmad_vvvvvl
+#define _vel_pvfnmad_vsvvl __builtin_ve_vl_pvfnmad_vsvvl
+#define _vel_pvfnmad_vsvvvl __builtin_ve_vl_pvfnmad_vsvvvl
+#define _vel_pvfnmad_vvsvl __builtin_ve_vl_pvfnmad_vvsvl
+#define _vel_pvfnmad_vvsvvl __builtin_ve_vl_pvfnmad_vvsvvl
+#define _vel_pvfnmad_vvvvMvl __builtin_ve_vl_pvfnmad_vvvvMvl
+#define _vel_pvfnmad_vsvvMvl __builtin_ve_vl_pvfnmad_vsvvMvl
+#define _vel_pvfnmad_vvsvMvl __builtin_ve_vl_pvfnmad_vvsvMvl
+#define _vel_vfnmsbd_vvvvl __builtin_ve_vl_vfnmsbd_vvvvl
+#define _vel_vfnmsbd_vvvvvl __builtin_ve_vl_vfnmsbd_vvvvvl
+#define _vel_vfnmsbd_vsvvl __builtin_ve_vl_vfnmsbd_vsvvl
+#define _vel_vfnmsbd_vsvvvl __builtin_ve_vl_vfnmsbd_vsvvvl
+#define _vel_vfnmsbd_vvsvl __builtin_ve_vl_vfnmsbd_vvsvl
+#define _vel_vfnmsbd_vvsvvl __builtin_ve_vl_vfnmsbd_vvsvvl
+#define _vel_vfnmsbd_vvvvmvl __builtin_ve_vl_vfnmsbd_vvvvmvl
+#define _vel_vfnmsbd_vsvvmvl __builtin_ve_vl_vfnmsbd_vsvvmvl
+#define _vel_vfnmsbd_vvsvmvl __builtin_ve_vl_vfnmsbd_vvsvmvl
+#define _vel_vfnmsbs_vvvvl __builtin_ve_vl_vfnmsbs_vvvvl
+#define _vel_vfnmsbs_vvvvvl __builtin_ve_vl_vfnmsbs_vvvvvl
+#define _vel_vfnmsbs_vsvvl __builtin_ve_vl_vfnmsbs_vsvvl
+#define _vel_vfnmsbs_vsvvvl __builtin_ve_vl_vfnmsbs_vsvvvl
+#define _vel_vfnmsbs_vvsvl __builtin_ve_vl_vfnmsbs_vvsvl
+#define _vel_vfnmsbs_vvsvvl __builtin_ve_vl_vfnmsbs_vvsvvl
+#define _vel_vfnmsbs_vvvvmvl __builtin_ve_vl_vfnmsbs_vvvvmvl
+#define _vel_vfnmsbs_vsvvmvl __builtin_ve_vl_vfnmsbs_vsvvmvl
+#define _vel_vfnmsbs_vvsvmvl __builtin_ve_vl_vfnmsbs_vvsvmvl
+#define _vel_pvfnmsb_vvvvl __builtin_ve_vl_pvfnmsb_vvvvl
+#define _vel_pvfnmsb_vvvvvl __builtin_ve_vl_pvfnmsb_vvvvvl
+#define _vel_pvfnmsb_vsvvl __builtin_ve_vl_pvfnmsb_vsvvl
+#define _vel_pvfnmsb_vsvvvl __builtin_ve_vl_pvfnmsb_vsvvvl
+#define _vel_pvfnmsb_vvsvl __builtin_ve_vl_pvfnmsb_vvsvl
+#define _vel_pvfnmsb_vvsvvl __builtin_ve_vl_pvfnmsb_vvsvvl
+#define _vel_pvfnmsb_vvvvMvl __builtin_ve_vl_pvfnmsb_vvvvMvl
+#define _vel_pvfnmsb_vsvvMvl __builtin_ve_vl_pvfnmsb_vsvvMvl
+#define _vel_pvfnmsb_vvsvMvl __builtin_ve_vl_pvfnmsb_vvsvMvl
+#define _vel_vrcpd_vvl __builtin_ve_vl_vrcpd_vvl
+#define _vel_vrcpd_vvvl __builtin_ve_vl_vrcpd_vvvl
+#define _vel_vrcps_vvl __builtin_ve_vl_vrcps_vvl
+#define _vel_vrcps_vvvl __builtin_ve_vl_vrcps_vvvl
+#define _vel_pvrcp_vvl __builtin_ve_vl_pvrcp_vvl
+#define _vel_pvrcp_vvvl __builtin_ve_vl_pvrcp_vvvl
+#define _vel_vrsqrtd_vvl __builtin_ve_vl_vrsqrtd_vvl
+#define _vel_vrsqrtd_vvvl __builtin_ve_vl_vrsqrtd_vvvl
+#define _vel_vrsqrts_vvl __builtin_ve_vl_vrsqrts_vvl
+#define _vel_vrsqrts_vvvl __builtin_ve_vl_vrsqrts_vvvl
+#define _vel_pvrsqrt_vvl __builtin_ve_vl_pvrsqrt_vvl
+#define _vel_pvrsqrt_vvvl __builtin_ve_vl_pvrsqrt_vvvl
+#define _vel_vrsqrtdnex_vvl __builtin_ve_vl_vrsqrtdnex_vvl
+#define _vel_vrsqrtdnex_vvvl __builtin_ve_vl_vrsqrtdnex_vvvl
+#define _vel_vrsqrtsnex_vvl __builtin_ve_vl_vrsqrtsnex_vvl
+#define _vel_vrsqrtsnex_vvvl __builtin_ve_vl_vrsqrtsnex_vvvl
+#define _vel_pvrsqrtnex_vvl __builtin_ve_vl_pvrsqrtnex_vvl
+#define _vel_pvrsqrtnex_vvvl __builtin_ve_vl_pvrsqrtnex_vvvl
+#define _vel_vcvtwdsx_vvl __builtin_ve_vl_vcvtwdsx_vvl
+#define _vel_vcvtwdsx_vvvl __builtin_ve_vl_vcvtwdsx_vvvl
+#define _vel_vcvtwdsx_vvmvl __builtin_ve_vl_vcvtwdsx_vvmvl
+#define _vel_vcvtwdsxrz_vvl __builtin_ve_vl_vcvtwdsxrz_vvl
+#define _vel_vcvtwdsxrz_vvvl __builtin_ve_vl_vcvtwdsxrz_vvvl
+#define _vel_vcvtwdsxrz_vvmvl __builtin_ve_vl_vcvtwdsxrz_vvmvl
+#define _vel_vcvtwdzx_vvl __builtin_ve_vl_vcvtwdzx_vvl
+#define _vel_vcvtwdzx_vvvl __builtin_ve_vl_vcvtwdzx_vvvl
+#define _vel_vcvtwdzx_vvmvl __builtin_ve_vl_vcvtwdzx_vvmvl
+#define _vel_vcvtwdzxrz_vvl __builtin_ve_vl_vcvtwdzxrz_vvl
+#define _vel_vcvtwdzxrz_vvvl __builtin_ve_vl_vcvtwdzxrz_vvvl
+#define _vel_vcvtwdzxrz_vvmvl __builtin_ve_vl_vcvtwdzxrz_vvmvl
+#define _vel_vcvtwssx_vvl __builtin_ve_vl_vcvtwssx_vvl
+#define _vel_vcvtwssx_vvvl __builtin_ve_vl_vcvtwssx_vvvl
+#define _vel_vcvtwssx_vvmvl __builtin_ve_vl_vcvtwssx_vvmvl
+#define _vel_vcvtwssxrz_vvl __builtin_ve_vl_vcvtwssxrz_vvl
+#define _vel_vcvtwssxrz_vvvl __builtin_ve_vl_vcvtwssxrz_vvvl
+#define _vel_vcvtwssxrz_vvmvl __builtin_ve_vl_vcvtwssxrz_vvmvl
+#define _vel_vcvtwszx_vvl __builtin_ve_vl_vcvtwszx_vvl
+#define _vel_vcvtwszx_vvvl __builtin_ve_vl_vcvtwszx_vvvl
+#define _vel_vcvtwszx_vvmvl __builtin_ve_vl_vcvtwszx_vvmvl
+#define _vel_vcvtwszxrz_vvl __builtin_ve_vl_vcvtwszxrz_vvl
+#define _vel_vcvtwszxrz_vvvl __builtin_ve_vl_vcvtwszxrz_vvvl
+#define _vel_vcvtwszxrz_vvmvl __builtin_ve_vl_vcvtwszxrz_vvmvl
+#define _vel_pvcvtws_vvl __builtin_ve_vl_pvcvtws_vvl
+#define _vel_pvcvtws_vvvl __builtin_ve_vl_pvcvtws_vvvl
+#define _vel_pvcvtws_vvMvl __builtin_ve_vl_pvcvtws_vvMvl
+#define _vel_pvcvtwsrz_vvl __builtin_ve_vl_pvcvtwsrz_vvl
+#define _vel_pvcvtwsrz_vvvl __builtin_ve_vl_pvcvtwsrz_vvvl
+#define _vel_pvcvtwsrz_vvMvl __builtin_ve_vl_pvcvtwsrz_vvMvl
+#define _vel_vcvtld_vvl __builtin_ve_vl_vcvtld_vvl
+#define _vel_vcvtld_vvvl __builtin_ve_vl_vcvtld_vvvl
+#define _vel_vcvtld_vvmvl __builtin_ve_vl_vcvtld_vvmvl
+#define _vel_vcvtldrz_vvl __builtin_ve_vl_vcvtldrz_vvl
+#define _vel_vcvtldrz_vvvl __builtin_ve_vl_vcvtldrz_vvvl
+#define _vel_vcvtldrz_vvmvl __builtin_ve_vl_vcvtldrz_vvmvl
+#define _vel_vcvtdw_vvl __builtin_ve_vl_vcvtdw_vvl
+#define _vel_vcvtdw_vvvl __builtin_ve_vl_vcvtdw_vvvl
+#define _vel_vcvtsw_vvl __builtin_ve_vl_vcvtsw_vvl
+#define _vel_vcvtsw_vvvl __builtin_ve_vl_vcvtsw_vvvl
+#define _vel_pvcvtsw_vvl __builtin_ve_vl_pvcvtsw_vvl
+#define _vel_pvcvtsw_vvvl __builtin_ve_vl_pvcvtsw_vvvl
+#define _vel_vcvtdl_vvl __builtin_ve_vl_vcvtdl_vvl
+#define _vel_vcvtdl_vvvl __builtin_ve_vl_vcvtdl_vvvl
+#define _vel_vcvtds_vvl __builtin_ve_vl_vcvtds_vvl
+#define _vel_vcvtds_vvvl __builtin_ve_vl_vcvtds_vvvl
+#define _vel_vcvtsd_vvl __builtin_ve_vl_vcvtsd_vvl
+#define _vel_vcvtsd_vvvl __builtin_ve_vl_vcvtsd_vvvl
+#define _vel_vmrg_vvvml __builtin_ve_vl_vmrg_vvvml
+#define _vel_vmrg_vvvmvl __builtin_ve_vl_vmrg_vvvmvl
+#define _vel_vmrg_vsvml __builtin_ve_vl_vmrg_vsvml
+#define _vel_vmrg_vsvmvl __builtin_ve_vl_vmrg_vsvmvl
+#define _vel_vmrgw_vvvMl __builtin_ve_vl_vmrgw_vvvMl
+#define _vel_vmrgw_vvvMvl __builtin_ve_vl_vmrgw_vvvMvl
+#define _vel_vmrgw_vsvMl __builtin_ve_vl_vmrgw_vsvMl
+#define _vel_vmrgw_vsvMvl __builtin_ve_vl_vmrgw_vsvMvl
+#define _vel_vshf_vvvsl __builtin_ve_vl_vshf_vvvsl
+#define _vel_vshf_vvvsvl __builtin_ve_vl_vshf_vvvsvl
+#define _vel_vcp_vvmvl __builtin_ve_vl_vcp_vvmvl
+#define _vel_vex_vvmvl __builtin_ve_vl_vex_vvmvl
+#define _vel_vfmklat_ml __builtin_ve_vl_vfmklat_ml
+#define _vel_vfmklaf_ml __builtin_ve_vl_vfmklaf_ml
+#define _vel_pvfmkat_Ml __builtin_ve_vl_pvfmkat_Ml
+#define _vel_pvfmkaf_Ml __builtin_ve_vl_pvfmkaf_Ml
+#define _vel_vfmklgt_mvl __builtin_ve_vl_vfmklgt_mvl
+#define _vel_vfmklgt_mvml __builtin_ve_vl_vfmklgt_mvml
+#define _vel_vfmkllt_mvl __builtin_ve_vl_vfmkllt_mvl
+#define _vel_vfmkllt_mvml __builtin_ve_vl_vfmkllt_mvml
+#define _vel_vfmklne_mvl __builtin_ve_vl_vfmklne_mvl
+#define _vel_vfmklne_mvml __builtin_ve_vl_vfmklne_mvml
+#define _vel_vfmkleq_mvl __builtin_ve_vl_vfmkleq_mvl
+#define _vel_vfmkleq_mvml __builtin_ve_vl_vfmkleq_mvml
+#define _vel_vfmklge_mvl __builtin_ve_vl_vfmklge_mvl
+#define _vel_vfmklge_mvml __builtin_ve_vl_vfmklge_mvml
+#define _vel_vfmklle_mvl __builtin_ve_vl_vfmklle_mvl
+#define _vel_vfmklle_mvml __builtin_ve_vl_vfmklle_mvml
+#define _vel_vfmklnum_mvl __builtin_ve_vl_vfmklnum_mvl
+#define _vel_vfmklnum_mvml __builtin_ve_vl_vfmklnum_mvml
+#define _vel_vfmklnan_mvl __builtin_ve_vl_vfmklnan_mvl
+#define _vel_vfmklnan_mvml __builtin_ve_vl_vfmklnan_mvml
+#define _vel_vfmklgtnan_mvl __builtin_ve_vl_vfmklgtnan_mvl
+#define _vel_vfmklgtnan_mvml __builtin_ve_vl_vfmklgtnan_mvml
+#define _vel_vfmklltnan_mvl __builtin_ve_vl_vfmklltnan_mvl
+#define _vel_vfmklltnan_mvml __builtin_ve_vl_vfmklltnan_mvml
+#define _vel_vfmklnenan_mvl __builtin_ve_vl_vfmklnenan_mvl
+#define _vel_vfmklnenan_mvml __builtin_ve_vl_vfmklnenan_mvml
+#define _vel_vfmkleqnan_mvl __builtin_ve_vl_vfmkleqnan_mvl
+#define _vel_vfmkleqnan_mvml __builtin_ve_vl_vfmkleqnan_mvml
+#define _vel_vfmklgenan_mvl __builtin_ve_vl_vfmklgenan_mvl
+#define _vel_vfmklgenan_mvml __builtin_ve_vl_vfmklgenan_mvml
+#define _vel_vfmkllenan_mvl __builtin_ve_vl_vfmkllenan_mvl
+#define _vel_vfmkllenan_mvml __builtin_ve_vl_vfmkllenan_mvml
+#define _vel_vfmkwgt_mvl __builtin_ve_vl_vfmkwgt_mvl
+#define _vel_vfmkwgt_mvml __builtin_ve_vl_vfmkwgt_mvml
+#define _vel_vfmkwlt_mvl __builtin_ve_vl_vfmkwlt_mvl
+#define _vel_vfmkwlt_mvml __builtin_ve_vl_vfmkwlt_mvml
+#define _vel_vfmkwne_mvl __builtin_ve_vl_vfmkwne_mvl
+#define _vel_vfmkwne_mvml __builtin_ve_vl_vfmkwne_mvml
+#define _vel_vfmkweq_mvl __builtin_ve_vl_vfmkweq_mvl
+#define _vel_vfmkweq_mvml __builtin_ve_vl_vfmkweq_mvml
+#define _vel_vfmkwge_mvl __builtin_ve_vl_vfmkwge_mvl
+#define _vel_vfmkwge_mvml __builtin_ve_vl_vfmkwge_mvml
+#define _vel_vfmkwle_mvl __builtin_ve_vl_vfmkwle_mvl
+#define _vel_vfmkwle_mvml __builtin_ve_vl_vfmkwle_mvml
+#define _vel_vfmkwnum_mvl __builtin_ve_vl_vfmkwnum_mvl
+#define _vel_vfmkwnum_mvml __builtin_ve_vl_vfmkwnum_mvml
+#define _vel_vfmkwnan_mvl __builtin_ve_vl_vfmkwnan_mvl
+#define _vel_vfmkwnan_mvml __builtin_ve_vl_vfmkwnan_mvml
+#define _vel_vfmkwgtnan_mvl __builtin_ve_vl_vfmkwgtnan_mvl
+#define _vel_vfmkwgtnan_mvml __builtin_ve_vl_vfmkwgtnan_mvml
+#define _vel_vfmkwltnan_mvl __builtin_ve_vl_vfmkwltnan_mvl
+#define _vel_vfmkwltnan_mvml __builtin_ve_vl_vfmkwltnan_mvml
+#define _vel_vfmkwnenan_mvl __builtin_ve_vl_vfmkwnenan_mvl
+#define _vel_vfmkwnenan_mvml __builtin_ve_vl_vfmkwnenan_mvml
+#define _vel_vfmkweqnan_mvl __builtin_ve_vl_vfmkweqnan_mvl
+#define _vel_vfmkweqnan_mvml __builtin_ve_vl_vfmkweqnan_mvml
+#define _vel_vfmkwgenan_mvl __builtin_ve_vl_vfmkwgenan_mvl
+#define _vel_vfmkwgenan_mvml __builtin_ve_vl_vfmkwgenan_mvml
+#define _vel_vfmkwlenan_mvl __builtin_ve_vl_vfmkwlenan_mvl
+#define _vel_vfmkwlenan_mvml __builtin_ve_vl_vfmkwlenan_mvml
+#define _vel_pvfmkwlogt_mvl __builtin_ve_vl_pvfmkwlogt_mvl
+#define _vel_pvfmkwupgt_mvl __builtin_ve_vl_pvfmkwupgt_mvl
+#define _vel_pvfmkwlogt_mvml __builtin_ve_vl_pvfmkwlogt_mvml
+#define _vel_pvfmkwupgt_mvml __builtin_ve_vl_pvfmkwupgt_mvml
+#define _vel_pvfmkwlolt_mvl __builtin_ve_vl_pvfmkwlolt_mvl
+#define _vel_pvfmkwuplt_mvl __builtin_ve_vl_pvfmkwuplt_mvl
+#define _vel_pvfmkwlolt_mvml __builtin_ve_vl_pvfmkwlolt_mvml
+#define _vel_pvfmkwuplt_mvml __builtin_ve_vl_pvfmkwuplt_mvml
+#define _vel_pvfmkwlone_mvl __builtin_ve_vl_pvfmkwlone_mvl
+#define _vel_pvfmkwupne_mvl __builtin_ve_vl_pvfmkwupne_mvl
+#define _vel_pvfmkwlone_mvml __builtin_ve_vl_pvfmkwlone_mvml
+#define _vel_pvfmkwupne_mvml __builtin_ve_vl_pvfmkwupne_mvml
+#define _vel_pvfmkwloeq_mvl __builtin_ve_vl_pvfmkwloeq_mvl
+#define _vel_pvfmkwupeq_mvl __builtin_ve_vl_pvfmkwupeq_mvl
+#define _vel_pvfmkwloeq_mvml __builtin_ve_vl_pvfmkwloeq_mvml
+#define _vel_pvfmkwupeq_mvml __builtin_ve_vl_pvfmkwupeq_mvml
+#define _vel_pvfmkwloge_mvl __builtin_ve_vl_pvfmkwloge_mvl
+#define _vel_pvfmkwupge_mvl __builtin_ve_vl_pvfmkwupge_mvl
+#define _vel_pvfmkwloge_mvml __builtin_ve_vl_pvfmkwloge_mvml
+#define _vel_pvfmkwupge_mvml __builtin_ve_vl_pvfmkwupge_mvml
+#define _vel_pvfmkwlole_mvl __builtin_ve_vl_pvfmkwlole_mvl
+#define _vel_pvfmkwuple_mvl __builtin_ve_vl_pvfmkwuple_mvl
+#define _vel_pvfmkwlole_mvml __builtin_ve_vl_pvfmkwlole_mvml
+#define _vel_pvfmkwuple_mvml __builtin_ve_vl_pvfmkwuple_mvml
+#define _vel_pvfmkwlonum_mvl __builtin_ve_vl_pvfmkwlonum_mvl
+#define _vel_pvfmkwupnum_mvl __builtin_ve_vl_pvfmkwupnum_mvl
+#define _vel_pvfmkwlonum_mvml __builtin_ve_vl_pvfmkwlonum_mvml
+#define _vel_pvfmkwupnum_mvml __builtin_ve_vl_pvfmkwupnum_mvml
+#define _vel_pvfmkwlonan_mvl __builtin_ve_vl_pvfmkwlonan_mvl
+#define _vel_pvfmkwupnan_mvl __builtin_ve_vl_pvfmkwupnan_mvl
+#define _vel_pvfmkwlonan_mvml __builtin_ve_vl_pvfmkwlonan_mvml
+#define _vel_pvfmkwupnan_mvml __builtin_ve_vl_pvfmkwupnan_mvml
+#define _vel_pvfmkwlogtnan_mvl __builtin_ve_vl_pvfmkwlogtnan_mvl
+#define _vel_pvfmkwupgtnan_mvl __builtin_ve_vl_pvfmkwupgtnan_mvl
+#define _vel_pvfmkwlogtnan_mvml __builtin_ve_vl_pvfmkwlogtnan_mvml
+#define _vel_pvfmkwupgtnan_mvml __builtin_ve_vl_pvfmkwupgtnan_mvml
+#define _vel_pvfmkwloltnan_mvl __builtin_ve_vl_pvfmkwloltnan_mvl
+#define _vel_pvfmkwupltnan_mvl __builtin_ve_vl_pvfmkwupltnan_mvl
+#define _vel_pvfmkwloltnan_mvml __builtin_ve_vl_pvfmkwloltnan_mvml
+#define _vel_pvfmkwupltnan_mvml __builtin_ve_vl_pvfmkwupltnan_mvml
+#define _vel_pvfmkwlonenan_mvl __builtin_ve_vl_pvfmkwlonenan_mvl
+#define _vel_pvfmkwupnenan_mvl __builtin_ve_vl_pvfmkwupnenan_mvl
+#define _vel_pvfmkwlonenan_mvml __builtin_ve_vl_pvfmkwlonenan_mvml
+#define _vel_pvfmkwupnenan_mvml __builtin_ve_vl_pvfmkwupnenan_mvml
+#define _vel_pvfmkwloeqnan_mvl __builtin_ve_vl_pvfmkwloeqnan_mvl
+#define _vel_pvfmkwupeqnan_mvl __builtin_ve_vl_pvfmkwupeqnan_mvl
+#define _vel_pvfmkwloeqnan_mvml __builtin_ve_vl_pvfmkwloeqnan_mvml
+#define _vel_pvfmkwupeqnan_mvml __builtin_ve_vl_pvfmkwupeqnan_mvml
+#define _vel_pvfmkwlogenan_mvl __builtin_ve_vl_pvfmkwlogenan_mvl
+#define _vel_pvfmkwupgenan_mvl __builtin_ve_vl_pvfmkwupgenan_mvl
+#define _vel_pvfmkwlogenan_mvml __builtin_ve_vl_pvfmkwlogenan_mvml
+#define _vel_pvfmkwupgenan_mvml __builtin_ve_vl_pvfmkwupgenan_mvml
+#define _vel_pvfmkwlolenan_mvl __builtin_ve_vl_pvfmkwlolenan_mvl
+#define _vel_pvfmkwuplenan_mvl __builtin_ve_vl_pvfmkwuplenan_mvl
+#define _vel_pvfmkwlolenan_mvml __builtin_ve_vl_pvfmkwlolenan_mvml
+#define _vel_pvfmkwuplenan_mvml __builtin_ve_vl_pvfmkwuplenan_mvml
+#define _vel_pvfmkwgt_Mvl __builtin_ve_vl_pvfmkwgt_Mvl
+#define _vel_pvfmkwgt_MvMl __builtin_ve_vl_pvfmkwgt_MvMl
+#define _vel_pvfmkwlt_Mvl __builtin_ve_vl_pvfmkwlt_Mvl
+#define _vel_pvfmkwlt_MvMl __builtin_ve_vl_pvfmkwlt_MvMl
+#define _vel_pvfmkwne_Mvl __builtin_ve_vl_pvfmkwne_Mvl
+#define _vel_pvfmkwne_MvMl __builtin_ve_vl_pvfmkwne_MvMl
+#define _vel_pvfmkweq_Mvl __builtin_ve_vl_pvfmkweq_Mvl
+#define _vel_pvfmkweq_MvMl __builtin_ve_vl_pvfmkweq_MvMl
+#define _vel_pvfmkwge_Mvl __builtin_ve_vl_pvfmkwge_Mvl
+#define _vel_pvfmkwge_MvMl __builtin_ve_vl_pvfmkwge_MvMl
+#define _vel_pvfmkwle_Mvl __builtin_ve_vl_pvfmkwle_Mvl
+#define _vel_pvfmkwle_MvMl __builtin_ve_vl_pvfmkwle_MvMl
+#define _vel_pvfmkwnum_Mvl __builtin_ve_vl_pvfmkwnum_Mvl
+#define _vel_pvfmkwnum_MvMl __builtin_ve_vl_pvfmkwnum_MvMl
+#define _vel_pvfmkwnan_Mvl __builtin_ve_vl_pvfmkwnan_Mvl
+#define _vel_pvfmkwnan_MvMl __builtin_ve_vl_pvfmkwnan_MvMl
+#define _vel_pvfmkwgtnan_Mvl __builtin_ve_vl_pvfmkwgtnan_Mvl
+#define _vel_pvfmkwgtnan_MvMl __builtin_ve_vl_pvfmkwgtnan_MvMl
+#define _vel_pvfmkwltnan_Mvl __builtin_ve_vl_pvfmkwltnan_Mvl
+#define _vel_pvfmkwltnan_MvMl __builtin_ve_vl_pvfmkwltnan_MvMl
+#define _vel_pvfmkwnenan_Mvl __builtin_ve_vl_pvfmkwnenan_Mvl
+#define _vel_pvfmkwnenan_MvMl __builtin_ve_vl_pvfmkwnenan_MvMl
+#define _vel_pvfmkweqnan_Mvl __builtin_ve_vl_pvfmkweqnan_Mvl
+#define _vel_pvfmkweqnan_MvMl __builtin_ve_vl_pvfmkweqnan_MvMl
+#define _vel_pvfmkwgenan_Mvl __builtin_ve_vl_pvfmkwgenan_Mvl
+#define _vel_pvfmkwgenan_MvMl __builtin_ve_vl_pvfmkwgenan_MvMl
+#define _vel_pvfmkwlenan_Mvl __builtin_ve_vl_pvfmkwlenan_Mvl
+#define _vel_pvfmkwlenan_MvMl __builtin_ve_vl_pvfmkwlenan_MvMl
+#define _vel_vfmkdgt_mvl __builtin_ve_vl_vfmkdgt_mvl
+#define _vel_vfmkdgt_mvml __builtin_ve_vl_vfmkdgt_mvml
+#define _vel_vfmkdlt_mvl __builtin_ve_vl_vfmkdlt_mvl
+#define _vel_vfmkdlt_mvml __builtin_ve_vl_vfmkdlt_mvml
+#define _vel_vfmkdne_mvl __builtin_ve_vl_vfmkdne_mvl
+#define _vel_vfmkdne_mvml __builtin_ve_vl_vfmkdne_mvml
+#define _vel_vfmkdeq_mvl __builtin_ve_vl_vfmkdeq_mvl
+#define _vel_vfmkdeq_mvml __builtin_ve_vl_vfmkdeq_mvml
+#define _vel_vfmkdge_mvl __builtin_ve_vl_vfmkdge_mvl
+#define _vel_vfmkdge_mvml __builtin_ve_vl_vfmkdge_mvml
+#define _vel_vfmkdle_mvl __builtin_ve_vl_vfmkdle_mvl
+#define _vel_vfmkdle_mvml __builtin_ve_vl_vfmkdle_mvml
+#define _vel_vfmkdnum_mvl __builtin_ve_vl_vfmkdnum_mvl
+#define _vel_vfmkdnum_mvml __builtin_ve_vl_vfmkdnum_mvml
+#define _vel_vfmkdnan_mvl __builtin_ve_vl_vfmkdnan_mvl
+#define _vel_vfmkdnan_mvml __builtin_ve_vl_vfmkdnan_mvml
+#define _vel_vfmkdgtnan_mvl __builtin_ve_vl_vfmkdgtnan_mvl
+#define _vel_vfmkdgtnan_mvml __builtin_ve_vl_vfmkdgtnan_mvml
+#define _vel_vfmkdltnan_mvl __builtin_ve_vl_vfmkdltnan_mvl
+#define _vel_vfmkdltnan_mvml __builtin_ve_vl_vfmkdltnan_mvml
+#define _vel_vfmkdnenan_mvl __builtin_ve_vl_vfmkdnenan_mvl
+#define _vel_vfmkdnenan_mvml __builtin_ve_vl_vfmkdnenan_mvml
+#define _vel_vfmkdeqnan_mvl __builtin_ve_vl_vfmkdeqnan_mvl
+#define _vel_vfmkdeqnan_mvml __builtin_ve_vl_vfmkdeqnan_mvml
+#define _vel_vfmkdgenan_mvl __builtin_ve_vl_vfmkdgenan_mvl
+#define _vel_vfmkdgenan_mvml __builtin_ve_vl_vfmkdgenan_mvml
+#define _vel_vfmkdlenan_mvl __builtin_ve_vl_vfmkdlenan_mvl
+#define _vel_vfmkdlenan_mvml __builtin_ve_vl_vfmkdlenan_mvml
+#define _vel_vfmksgt_mvl __builtin_ve_vl_vfmksgt_mvl
+#define _vel_vfmksgt_mvml __builtin_ve_vl_vfmksgt_mvml
+#define _vel_vfmkslt_mvl __builtin_ve_vl_vfmkslt_mvl
+#define _vel_vfmkslt_mvml __builtin_ve_vl_vfmkslt_mvml
+#define _vel_vfmksne_mvl __builtin_ve_vl_vfmksne_mvl
+#define _vel_vfmksne_mvml __builtin_ve_vl_vfmksne_mvml
+#define _vel_vfmkseq_mvl __builtin_ve_vl_vfmkseq_mvl
+#define _vel_vfmkseq_mvml __builtin_ve_vl_vfmkseq_mvml
+#define _vel_vfmksge_mvl __builtin_ve_vl_vfmksge_mvl
+#define _vel_vfmksge_mvml __builtin_ve_vl_vfmksge_mvml
+#define _vel_vfmksle_mvl __builtin_ve_vl_vfmksle_mvl
+#define _vel_vfmksle_mvml __builtin_ve_vl_vfmksle_mvml
+#define _vel_vfmksnum_mvl __builtin_ve_vl_vfmksnum_mvl
+#define _vel_vfmksnum_mvml __builtin_ve_vl_vfmksnum_mvml
+#define _vel_vfmksnan_mvl __builtin_ve_vl_vfmksnan_mvl
+#define _vel_vfmksnan_mvml __builtin_ve_vl_vfmksnan_mvml
+#define _vel_vfmksgtnan_mvl __builtin_ve_vl_vfmksgtnan_mvl
+#define _vel_vfmksgtnan_mvml __builtin_ve_vl_vfmksgtnan_mvml
+#define _vel_vfmksltnan_mvl __builtin_ve_vl_vfmksltnan_mvl
+#define _vel_vfmksltnan_mvml __builtin_ve_vl_vfmksltnan_mvml
+#define _vel_vfmksnenan_mvl __builtin_ve_vl_vfmksnenan_mvl
+#define _vel_vfmksnenan_mvml __builtin_ve_vl_vfmksnenan_mvml
+#define _vel_vfmkseqnan_mvl __builtin_ve_vl_vfmkseqnan_mvl
+#define _vel_vfmkseqnan_mvml __builtin_ve_vl_vfmkseqnan_mvml
+#define _vel_vfmksgenan_mvl __builtin_ve_vl_vfmksgenan_mvl
+#define _vel_vfmksgenan_mvml __builtin_ve_vl_vfmksgenan_mvml
+#define _vel_vfmkslenan_mvl __builtin_ve_vl_vfmkslenan_mvl
+#define _vel_vfmkslenan_mvml __builtin_ve_vl_vfmkslenan_mvml
+#define _vel_pvfmkslogt_mvl __builtin_ve_vl_pvfmkslogt_mvl
+#define _vel_pvfmksupgt_mvl __builtin_ve_vl_pvfmksupgt_mvl
+#define _vel_pvfmkslogt_mvml __builtin_ve_vl_pvfmkslogt_mvml
+#define _vel_pvfmksupgt_mvml __builtin_ve_vl_pvfmksupgt_mvml
+#define _vel_pvfmkslolt_mvl __builtin_ve_vl_pvfmkslolt_mvl
+#define _vel_pvfmksuplt_mvl __builtin_ve_vl_pvfmksuplt_mvl
+#define _vel_pvfmkslolt_mvml __builtin_ve_vl_pvfmkslolt_mvml
+#define _vel_pvfmksuplt_mvml __builtin_ve_vl_pvfmksuplt_mvml
+#define _vel_pvfmkslone_mvl __builtin_ve_vl_pvfmkslone_mvl
+#define _vel_pvfmksupne_mvl __builtin_ve_vl_pvfmksupne_mvl
+#define _vel_pvfmkslone_mvml __builtin_ve_vl_pvfmkslone_mvml
+#define _vel_pvfmksupne_mvml __builtin_ve_vl_pvfmksupne_mvml
+#define _vel_pvfmksloeq_mvl __builtin_ve_vl_pvfmksloeq_mvl
+#define _vel_pvfmksupeq_mvl __builtin_ve_vl_pvfmksupeq_mvl
+#define _vel_pvfmksloeq_mvml __builtin_ve_vl_pvfmksloeq_mvml
+#define _vel_pvfmksupeq_mvml __builtin_ve_vl_pvfmksupeq_mvml
+#define _vel_pvfmksloge_mvl __builtin_ve_vl_pvfmksloge_mvl
+#define _vel_pvfmksupge_mvl __builtin_ve_vl_pvfmksupge_mvl
+#define _vel_pvfmksloge_mvml __builtin_ve_vl_pvfmksloge_mvml
+#define _vel_pvfmksupge_mvml __builtin_ve_vl_pvfmksupge_mvml
+#define _vel_pvfmkslole_mvl __builtin_ve_vl_pvfmkslole_mvl
+#define _vel_pvfmksuple_mvl __builtin_ve_vl_pvfmksuple_mvl
+#define _vel_pvfmkslole_mvml __builtin_ve_vl_pvfmkslole_mvml
+#define _vel_pvfmksuple_mvml __builtin_ve_vl_pvfmksuple_mvml
+#define _vel_pvfmkslonum_mvl __builtin_ve_vl_pvfmkslonum_mvl
+#define _vel_pvfmksupnum_mvl __builtin_ve_vl_pvfmksupnum_mvl
+#define _vel_pvfmkslonum_mvml __builtin_ve_vl_pvfmkslonum_mvml
+#define _vel_pvfmksupnum_mvml __builtin_ve_vl_pvfmksupnum_mvml
+#define _vel_pvfmkslonan_mvl __builtin_ve_vl_pvfmkslonan_mvl
+#define _vel_pvfmksupnan_mvl __builtin_ve_vl_pvfmksupnan_mvl
+#define _vel_pvfmkslonan_mvml __builtin_ve_vl_pvfmkslonan_mvml
+#define _vel_pvfmksupnan_mvml __builtin_ve_vl_pvfmksupnan_mvml
+#define _vel_pvfmkslogtnan_mvl __builtin_ve_vl_pvfmkslogtnan_mvl
+#define _vel_pvfmksupgtnan_mvl __builtin_ve_vl_pvfmksupgtnan_mvl
+#define _vel_pvfmkslogtnan_mvml __builtin_ve_vl_pvfmkslogtnan_mvml
+#define _vel_pvfmksupgtnan_mvml __builtin_ve_vl_pvfmksupgtnan_mvml
+#define _vel_pvfmksloltnan_mvl __builtin_ve_vl_pvfmksloltnan_mvl
+#define _vel_pvfmksupltnan_mvl __builtin_ve_vl_pvfmksupltnan_mvl
+#define _vel_pvfmksloltnan_mvml __builtin_ve_vl_pvfmksloltnan_mvml
+#define _vel_pvfmksupltnan_mvml __builtin_ve_vl_pvfmksupltnan_mvml
+#define _vel_pvfmkslonenan_mvl __builtin_ve_vl_pvfmkslonenan_mvl
+#define _vel_pvfmksupnenan_mvl __builtin_ve_vl_pvfmksupnenan_mvl
+#define _vel_pvfmkslonenan_mvml __builtin_ve_vl_pvfmkslonenan_mvml
+#define _vel_pvfmksupnenan_mvml __builtin_ve_vl_pvfmksupnenan_mvml
+#define _vel_pvfmksloeqnan_mvl __builtin_ve_vl_pvfmksloeqnan_mvl
+#define _vel_pvfmksupeqnan_mvl __builtin_ve_vl_pvfmksupeqnan_mvl
+#define _vel_pvfmksloeqnan_mvml __builtin_ve_vl_pvfmksloeqnan_mvml
+#define _vel_pvfmksupeqnan_mvml __builtin_ve_vl_pvfmksupeqnan_mvml
+#define _vel_pvfmkslogenan_mvl __builtin_ve_vl_pvfmkslogenan_mvl
+#define _vel_pvfmksupgenan_mvl __builtin_ve_vl_pvfmksupgenan_mvl
+#define _vel_pvfmkslogenan_mvml __builtin_ve_vl_pvfmkslogenan_mvml
+#define _vel_pvfmksupgenan_mvml __builtin_ve_vl_pvfmksupgenan_mvml
+#define _vel_pvfmkslolenan_mvl __builtin_ve_vl_pvfmkslolenan_mvl
+#define _vel_pvfmksuplenan_mvl __builtin_ve_vl_pvfmksuplenan_mvl
+#define _vel_pvfmkslolenan_mvml __builtin_ve_vl_pvfmkslolenan_mvml
+#define _vel_pvfmksuplenan_mvml __builtin_ve_vl_pvfmksuplenan_mvml
+#define _vel_pvfmksgt_Mvl __builtin_ve_vl_pvfmksgt_Mvl
+#define _vel_pvfmksgt_MvMl __builtin_ve_vl_pvfmksgt_MvMl
+#define _vel_pvfmkslt_Mvl __builtin_ve_vl_pvfmkslt_Mvl
+#define _vel_pvfmkslt_MvMl __builtin_ve_vl_pvfmkslt_MvMl
+#define _vel_pvfmksne_Mvl __builtin_ve_vl_pvfmksne_Mvl
+#define _vel_pvfmksne_MvMl __builtin_ve_vl_pvfmksne_MvMl
+#define _vel_pvfmkseq_Mvl __builtin_ve_vl_pvfmkseq_Mvl
+#define _vel_pvfmkseq_MvMl __builtin_ve_vl_pvfmkseq_MvMl
+#define _vel_pvfmksge_Mvl __builtin_ve_vl_pvfmksge_Mvl
+#define _vel_pvfmksge_MvMl __builtin_ve_vl_pvfmksge_MvMl
+#define _vel_pvfmksle_Mvl __builtin_ve_vl_pvfmksle_Mvl
+#define _vel_pvfmksle_MvMl __builtin_ve_vl_pvfmksle_MvMl
+#define _vel_pvfmksnum_Mvl __builtin_ve_vl_pvfmksnum_Mvl
+#define _vel_pvfmksnum_MvMl __builtin_ve_vl_pvfmksnum_MvMl
+#define _vel_pvfmksnan_Mvl __builtin_ve_vl_pvfmksnan_Mvl
+#define _vel_pvfmksnan_MvMl __builtin_ve_vl_pvfmksnan_MvMl
+#define _vel_pvfmksgtnan_Mvl __builtin_ve_vl_pvfmksgtnan_Mvl
+#define _vel_pvfmksgtnan_MvMl __builtin_ve_vl_pvfmksgtnan_MvMl
+#define _vel_pvfmksltnan_Mvl __builtin_ve_vl_pvfmksltnan_Mvl
+#define _vel_pvfmksltnan_MvMl __builtin_ve_vl_pvfmksltnan_MvMl
+#define _vel_pvfmksnenan_Mvl __builtin_ve_vl_pvfmksnenan_Mvl
+#define _vel_pvfmksnenan_MvMl __builtin_ve_vl_pvfmksnenan_MvMl
+#define _vel_pvfmkseqnan_Mvl __builtin_ve_vl_pvfmkseqnan_Mvl
+#define _vel_pvfmkseqnan_MvMl __builtin_ve_vl_pvfmkseqnan_MvMl
+#define _vel_pvfmksgenan_Mvl __builtin_ve_vl_pvfmksgenan_Mvl
+#define _vel_pvfmksgenan_MvMl __builtin_ve_vl_pvfmksgenan_MvMl
+#define _vel_pvfmkslenan_Mvl __builtin_ve_vl_pvfmkslenan_Mvl
+#define _vel_pvfmkslenan_MvMl __builtin_ve_vl_pvfmkslenan_MvMl
+#define _vel_vsumwsx_vvl __builtin_ve_vl_vsumwsx_vvl
+#define _vel_vsumwsx_vvml __builtin_ve_vl_vsumwsx_vvml
+#define _vel_vsumwzx_vvl __builtin_ve_vl_vsumwzx_vvl
+#define _vel_vsumwzx_vvml __builtin_ve_vl_vsumwzx_vvml
+#define _vel_vsuml_vvl __builtin_ve_vl_vsuml_vvl
+#define _vel_vsuml_vvml __builtin_ve_vl_vsuml_vvml
+#define _vel_vfsumd_vvl __builtin_ve_vl_vfsumd_vvl
+#define _vel_vfsumd_vvml __builtin_ve_vl_vfsumd_vvml
+#define _vel_vfsums_vvl __builtin_ve_vl_vfsums_vvl
+#define _vel_vfsums_vvml __builtin_ve_vl_vfsums_vvml
+#define _vel_vrmaxswfstsx_vvl __builtin_ve_vl_vrmaxswfstsx_vvl
+#define _vel_vrmaxswfstsx_vvvl __builtin_ve_vl_vrmaxswfstsx_vvvl
+#define _vel_vrmaxswlstsx_vvl __builtin_ve_vl_vrmaxswlstsx_vvl
+#define _vel_vrmaxswlstsx_vvvl __builtin_ve_vl_vrmaxswlstsx_vvvl
+#define _vel_vrmaxswfstzx_vvl __builtin_ve_vl_vrmaxswfstzx_vvl
+#define _vel_vrmaxswfstzx_vvvl __builtin_ve_vl_vrmaxswfstzx_vvvl
+#define _vel_vrmaxswlstzx_vvl __builtin_ve_vl_vrmaxswlstzx_vvl
+#define _vel_vrmaxswlstzx_vvvl __builtin_ve_vl_vrmaxswlstzx_vvvl
+#define _vel_vrminswfstsx_vvl __builtin_ve_vl_vrminswfstsx_vvl
+#define _vel_vrminswfstsx_vvvl __builtin_ve_vl_vrminswfstsx_vvvl
+#define _vel_vrminswlstsx_vvl __builtin_ve_vl_vrminswlstsx_vvl
+#define _vel_vrminswlstsx_vvvl __builtin_ve_vl_vrminswlstsx_vvvl
+#define _vel_vrminswfstzx_vvl __builtin_ve_vl_vrminswfstzx_vvl
+#define _vel_vrminswfstzx_vvvl __builtin_ve_vl_vrminswfstzx_vvvl
+#define _vel_vrminswlstzx_vvl __builtin_ve_vl_vrminswlstzx_vvl
+#define _vel_vrminswlstzx_vvvl __builtin_ve_vl_vrminswlstzx_vvvl
+#define _vel_vrmaxslfst_vvl __builtin_ve_vl_vrmaxslfst_vvl
+#define _vel_vrmaxslfst_vvvl __builtin_ve_vl_vrmaxslfst_vvvl
+#define _vel_vrmaxsllst_vvl __builtin_ve_vl_vrmaxsllst_vvl
+#define _vel_vrmaxsllst_vvvl __builtin_ve_vl_vrmaxsllst_vvvl
+#define _vel_vrminslfst_vvl __builtin_ve_vl_vrminslfst_vvl
+#define _vel_vrminslfst_vvvl __builtin_ve_vl_vrminslfst_vvvl
+#define _vel_vrminsllst_vvl __builtin_ve_vl_vrminsllst_vvl
+#define _vel_vrminsllst_vvvl __builtin_ve_vl_vrminsllst_vvvl
+#define _vel_vfrmaxdfst_vvl __builtin_ve_vl_vfrmaxdfst_vvl
+#define _vel_vfrmaxdfst_vvvl __builtin_ve_vl_vfrmaxdfst_vvvl
+#define _vel_vfrmaxdlst_vvl __builtin_ve_vl_vfrmaxdlst_vvl
+#define _vel_vfrmaxdlst_vvvl __builtin_ve_vl_vfrmaxdlst_vvvl
+#define _vel_vfrmaxsfst_vvl __builtin_ve_vl_vfrmaxsfst_vvl
+#define _vel_vfrmaxsfst_vvvl __builtin_ve_vl_vfrmaxsfst_vvvl
+#define _vel_vfrmaxslst_vvl __builtin_ve_vl_vfrmaxslst_vvl
+#define _vel_vfrmaxslst_vvvl __builtin_ve_vl_vfrmaxslst_vvvl
+#define _vel_vfrmindfst_vvl __builtin_ve_vl_vfrmindfst_vvl
+#define _vel_vfrmindfst_vvvl __builtin_ve_vl_vfrmindfst_vvvl
+#define _vel_vfrmindlst_vvl __builtin_ve_vl_vfrmindlst_vvl
+#define _vel_vfrmindlst_vvvl __builtin_ve_vl_vfrmindlst_vvvl
+#define _vel_vfrminsfst_vvl __builtin_ve_vl_vfrminsfst_vvl
+#define _vel_vfrminsfst_vvvl __builtin_ve_vl_vfrminsfst_vvvl
+#define _vel_vfrminslst_vvl __builtin_ve_vl_vfrminslst_vvl
+#define _vel_vfrminslst_vvvl __builtin_ve_vl_vfrminslst_vvvl
+#define _vel_vrand_vvl __builtin_ve_vl_vrand_vvl
+#define _vel_vrand_vvml __builtin_ve_vl_vrand_vvml
+#define _vel_vror_vvl __builtin_ve_vl_vror_vvl
+#define _vel_vror_vvml __builtin_ve_vl_vror_vvml
+#define _vel_vrxor_vvl __builtin_ve_vl_vrxor_vvl
+#define _vel_vrxor_vvml __builtin_ve_vl_vrxor_vvml
+#define _vel_vgt_vvssl __builtin_ve_vl_vgt_vvssl
+#define _vel_vgt_vvssvl __builtin_ve_vl_vgt_vvssvl
+#define _vel_vgt_vvssml __builtin_ve_vl_vgt_vvssml
+#define _vel_vgt_vvssmvl __builtin_ve_vl_vgt_vvssmvl
+#define _vel_vgtnc_vvssl __builtin_ve_vl_vgtnc_vvssl
+#define _vel_vgtnc_vvssvl __builtin_ve_vl_vgtnc_vvssvl
+#define _vel_vgtnc_vvssml __builtin_ve_vl_vgtnc_vvssml
+#define _vel_vgtnc_vvssmvl __builtin_ve_vl_vgtnc_vvssmvl
+#define _vel_vgtu_vvssl __builtin_ve_vl_vgtu_vvssl
+#define _vel_vgtu_vvssvl __builtin_ve_vl_vgtu_vvssvl
+#define _vel_vgtu_vvssml __builtin_ve_vl_vgtu_vvssml
+#define _vel_vgtu_vvssmvl __builtin_ve_vl_vgtu_vvssmvl
+#define _vel_vgtunc_vvssl __builtin_ve_vl_vgtunc_vvssl
+#define _vel_vgtunc_vvssvl __builtin_ve_vl_vgtunc_vvssvl
+#define _vel_vgtunc_vvssml __builtin_ve_vl_vgtunc_vvssml
+#define _vel_vgtunc_vvssmvl __builtin_ve_vl_vgtunc_vvssmvl
+#define _vel_vgtlsx_vvssl __builtin_ve_vl_vgtlsx_vvssl
+#define _vel_vgtlsx_vvssvl __builtin_ve_vl_vgtlsx_vvssvl
+#define _vel_vgtlsx_vvssml __builtin_ve_vl_vgtlsx_vvssml
+#define _vel_vgtlsx_vvssmvl __builtin_ve_vl_vgtlsx_vvssmvl
+#define _vel_vgtlsxnc_vvssl __builtin_ve_vl_vgtlsxnc_vvssl
+#define _vel_vgtlsxnc_vvssvl __builtin_ve_vl_vgtlsxnc_vvssvl
+#define _vel_vgtlsxnc_vvssml __builtin_ve_vl_vgtlsxnc_vvssml
+#define _vel_vgtlsxnc_vvssmvl __builtin_ve_vl_vgtlsxnc_vvssmvl
+#define _vel_vgtlzx_vvssl __builtin_ve_vl_vgtlzx_vvssl
+#define _vel_vgtlzx_vvssvl __builtin_ve_vl_vgtlzx_vvssvl
+#define _vel_vgtlzx_vvssml __builtin_ve_vl_vgtlzx_vvssml
+#define _vel_vgtlzx_vvssmvl __builtin_ve_vl_vgtlzx_vvssmvl
+#define _vel_vgtlzxnc_vvssl __builtin_ve_vl_vgtlzxnc_vvssl
+#define _vel_vgtlzxnc_vvssvl __builtin_ve_vl_vgtlzxnc_vvssvl
+#define _vel_vgtlzxnc_vvssml __builtin_ve_vl_vgtlzxnc_vvssml
+#define _vel_vgtlzxnc_vvssmvl __builtin_ve_vl_vgtlzxnc_vvssmvl
+#define _vel_vsc_vvssl __builtin_ve_vl_vsc_vvssl
+#define _vel_vsc_vvssml __builtin_ve_vl_vsc_vvssml
+#define _vel_vscnc_vvssl __builtin_ve_vl_vscnc_vvssl
+#define _vel_vscnc_vvssml __builtin_ve_vl_vscnc_vvssml
+#define _vel_vscot_vvssl __builtin_ve_vl_vscot_vvssl
+#define _vel_vscot_vvssml __builtin_ve_vl_vscot_vvssml
+#define _vel_vscncot_vvssl __builtin_ve_vl_vscncot_vvssl
+#define _vel_vscncot_vvssml __builtin_ve_vl_vscncot_vvssml
+#define _vel_vscu_vvssl __builtin_ve_vl_vscu_vvssl
+#define _vel_vscu_vvssml __builtin_ve_vl_vscu_vvssml
+#define _vel_vscunc_vvssl __builtin_ve_vl_vscunc_vvssl
+#define _vel_vscunc_vvssml __builtin_ve_vl_vscunc_vvssml
+#define _vel_vscuot_vvssl __builtin_ve_vl_vscuot_vvssl
+#define _vel_vscuot_vvssml __builtin_ve_vl_vscuot_vvssml
+#define _vel_vscuncot_vvssl __builtin_ve_vl_vscuncot_vvssl
+#define _vel_vscuncot_vvssml __builtin_ve_vl_vscuncot_vvssml
+#define _vel_vscl_vvssl __builtin_ve_vl_vscl_vvssl
+#define _vel_vscl_vvssml __builtin_ve_vl_vscl_vvssml
+#define _vel_vsclnc_vvssl __builtin_ve_vl_vsclnc_vvssl
+#define _vel_vsclnc_vvssml __builtin_ve_vl_vsclnc_vvssml
+#define _vel_vsclot_vvssl __builtin_ve_vl_vsclot_vvssl
+#define _vel_vsclot_vvssml __builtin_ve_vl_vsclot_vvssml
+#define _vel_vsclncot_vvssl __builtin_ve_vl_vsclncot_vvssl
+#define _vel_vsclncot_vvssml __builtin_ve_vl_vsclncot_vvssml
+#define _vel_andm_mmm __builtin_ve_vl_andm_mmm
+#define _vel_andm_MMM __builtin_ve_vl_andm_MMM
+#define _vel_orm_mmm __builtin_ve_vl_orm_mmm
+#define _vel_orm_MMM __builtin_ve_vl_orm_MMM
+#define _vel_xorm_mmm __builtin_ve_vl_xorm_mmm
+#define _vel_xorm_MMM __builtin_ve_vl_xorm_MMM
+#define _vel_eqvm_mmm __builtin_ve_vl_eqvm_mmm
+#define _vel_eqvm_MMM __builtin_ve_vl_eqvm_MMM
+#define _vel_nndm_mmm __builtin_ve_vl_nndm_mmm
+#define _vel_nndm_MMM __builtin_ve_vl_nndm_MMM
+#define _vel_negm_mm __builtin_ve_vl_negm_mm
+#define _vel_negm_MM __builtin_ve_vl_negm_MM
+#define _vel_pcvm_sml __builtin_ve_vl_pcvm_sml
+#define _vel_lzvm_sml __builtin_ve_vl_lzvm_sml
+#define _vel_tovm_sml __builtin_ve_vl_tovm_sml
+#define _vel_lcr_sss __builtin_ve_vl_lcr_sss
+#define _vel_scr_sss __builtin_ve_vl_scr_sss
+#define _vel_tscr_ssss __builtin_ve_vl_tscr_ssss
+#define _vel_fidcr_sss __builtin_ve_vl_fidcr_sss
+#define _vel_fencei __builtin_ve_vl_fencei
+#define _vel_fencem_s __builtin_ve_vl_fencem_s
+#define _vel_fencec_s __builtin_ve_vl_fencec_s
+#define _vel_svob __builtin_ve_vl_svob

diff  --git a/clang/test/CodeGen/VE/ve-velintrin.c b/clang/test/CodeGen/VE/ve-velintrin.c
index 51f7c5d359127..44c3309086e8b 100644
--- a/clang/test/CodeGen/VE/ve-velintrin.c
+++ b/clang/test/CodeGen/VE/ve-velintrin.c
@@ -5,7 +5,10 @@
 
 #include <velintrin.h>
 
-__vr vr1;
+long v1, v2, v3;
+double vd1;
+float vf1;
+__vr vr1, vr2, vr3, vr4;
 
 void __attribute__((noinline))
 test_vld_vssl(char* p, long idx) {
@@ -230,3 +233,4357 @@ test_vldl2dzxnc_vssvl(char* p, long idx) {
   // CHECK: call <256 x double> @llvm.ve.vl.vldl2dzxnc.vssvl(i64 %{{.*}}, i8* %{{.*}}, <256 x double> %{{.*}}, i32 256)
   vr1 = _vel_vldl2dzxnc_vssvl(idx, p, vr1, 256);
 }
+
+void __attribute__((noinline))
+test_vst_vssl(char* p, long idx) {
+  // CHECK-LABEL: @test_vst_vssl
+  // CHECK: call void @llvm.ve.vl.vst.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  _vel_vst_vssl(vr1, idx, p, 256);
+}
+
+void __attribute__((noinline))
+test_vstnc_vssl(char* p, long idx) {
+  // CHECK-LABEL: @test_vstnc_vssl
+  // CHECK: call void @llvm.ve.vl.vstnc.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  _vel_vstnc_vssl(vr1, idx, p, 256);
+}
+
+void __attribute__((noinline))
+test_vstot_vssl(char* p, long idx) {
+  // CHECK-LABEL: @test_vstot_vssl
+  // CHECK: call void @llvm.ve.vl.vstot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  _vel_vstot_vssl(vr1, idx, p, 256);
+}
+
+void __attribute__((noinline))
+test_vstncot_vssl(char* p, long idx) {
+  // CHECK-LABEL: @test_vstncot_vssl
+  // CHECK: call void @llvm.ve.vl.vstncot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  _vel_vstncot_vssl(vr1, idx, p, 256);
+}
+
+void __attribute__((noinline))
+test_vstu_vssl(char* p, long idx) {
+  // CHECK-LABEL: @test_vstu_vssl
+  // CHECK: call void @llvm.ve.vl.vstu.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  _vel_vstu_vssl(vr1, idx, p, 256);
+}
+
+void __attribute__((noinline))
+test_vstunc_vssl(char* p, long idx) {
+  // CHECK-LABEL: @test_vstunc_vssl
+  // CHECK: call void @llvm.ve.vl.vstunc.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  _vel_vstunc_vssl(vr1, idx, p, 256);
+}
+
+void __attribute__((noinline))
+test_vstuot_vssl(char* p, long idx) {
+  // CHECK-LABEL: @test_vstuot_vssl
+  // CHECK: call void @llvm.ve.vl.vstuot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  _vel_vstuot_vssl(vr1, idx, p, 256);
+}
+
+void __attribute__((noinline))
+test_vstuncot_vssl(char* p, long idx) {
+  // CHECK-LABEL: @test_vstuncot_vssl
+  // CHECK: call void @llvm.ve.vl.vstuncot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  _vel_vstuncot_vssl(vr1, idx, p, 256);
+}
+
+void __attribute__((noinline))
+test_vstl_vssl(char* p, long idx) {
+  // CHECK-LABEL: @test_vstl_vssl
+  // CHECK: call void @llvm.ve.vl.vstl.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  _vel_vstl_vssl(vr1, idx, p, 256);
+}
+
+void __attribute__((noinline))
+test_vstlnc_vssl(char* p, long idx) {
+  // CHECK-LABEL: @test_vstlnc_vssl
+  // CHECK: call void @llvm.ve.vl.vstlnc.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  _vel_vstlnc_vssl(vr1, idx, p, 256);
+}
+
+void __attribute__((noinline))
+test_vstlot_vssl(char* p, long idx) {
+  // CHECK-LABEL: @test_vstlot_vssl
+  // CHECK: call void @llvm.ve.vl.vstlot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  _vel_vstlot_vssl(vr1, idx, p, 256);
+}
+
+void __attribute__((noinline))
+test_vstlncot_vssl(char* p, long idx) {
+  // CHECK-LABEL: @test_vstlncot_vssl
+  // CHECK: call void @llvm.ve.vl.vstlncot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  _vel_vstlncot_vssl(vr1, idx, p, 256);
+}
+
+void __attribute__((noinline))
+test_vst2d_vssl(char* p, long idx) {
+  // CHECK-LABEL: @test_vst2d_vssl
+  // CHECK: call void @llvm.ve.vl.vst2d.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  _vel_vst2d_vssl(vr1, idx, p, 256);
+}
+
+void __attribute__((noinline))
+test_vst2dnc_vssl(char* p, long idx) {
+  // CHECK-LABEL: @test_vst2dnc_vssl
+  // CHECK: call void @llvm.ve.vl.vst2dnc.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  _vel_vst2dnc_vssl(vr1, idx, p, 256);
+}
+
+void __attribute__((noinline))
+test_vst2dot_vssl(char* p, long idx) {
+  // CHECK-LABEL: @test_vst2dot_vssl
+  // CHECK: call void @llvm.ve.vl.vst2dot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  _vel_vst2dot_vssl(vr1, idx, p, 256);
+}
+
+void __attribute__((noinline))
+test_vst2dncot_vssl(char* p, long idx) {
+  // CHECK-LABEL: @test_vst2dncot_vssl
+  // CHECK: call void @llvm.ve.vl.vst2dncot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  _vel_vst2dncot_vssl(vr1, idx, p, 256);
+}
+
+void __attribute__((noinline))
+test_vstu2d_vssl(char* p, long idx) {
+  // CHECK-LABEL: @test_vstu2d_vssl
+  // CHECK: call void @llvm.ve.vl.vstu2d.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  _vel_vstu2d_vssl(vr1, idx, p, 256);
+}
+
+void __attribute__((noinline))
+test_vstu2dnc_vssl(char* p, long idx) {
+  // CHECK-LABEL: @test_vstu2dnc_vssl
+  // CHECK: call void @llvm.ve.vl.vstu2dnc.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  _vel_vstu2dnc_vssl(vr1, idx, p, 256);
+}
+
+void __attribute__((noinline))
+test_vstu2dot_vssl(char* p, long idx) {
+  // CHECK-LABEL: @test_vstu2dot_vssl
+  // CHECK: call void @llvm.ve.vl.vstu2dot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  _vel_vstu2dot_vssl(vr1, idx, p, 256);
+}
+
+void __attribute__((noinline))
+test_vstu2dncot_vssl(char* p, long idx) {
+  // CHECK-LABEL: @test_vstu2dncot_vssl
+  // CHECK: call void @llvm.ve.vl.vstu2dncot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  _vel_vstu2dncot_vssl(vr1, idx, p, 256);
+}
+
+void __attribute__((noinline))
+test_vstl2d_vssl(char* p, long idx) {
+  // CHECK-LABEL: @test_vstl2d_vssl
+  // CHECK: call void @llvm.ve.vl.vstl2d.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  _vel_vstl2d_vssl(vr1, idx, p, 256);
+}
+
+void __attribute__((noinline))
+test_vstl2dnc_vssl(char* p, long idx) {
+  // CHECK-LABEL: @test_vstl2dnc_vssl
+  // CHECK: call void @llvm.ve.vl.vstl2dnc.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  _vel_vstl2dnc_vssl(vr1, idx, p, 256);
+}
+
+void __attribute__((noinline))
+test_vstl2dot_vssl(char* p, long idx) {
+  // CHECK-LABEL: @test_vstl2dot_vssl
+  // CHECK: call void @llvm.ve.vl.vstl2dot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  _vel_vstl2dot_vssl(vr1, idx, p, 256);
+}
+
+void __attribute__((noinline))
+test_vstl2dncot_vssl(char* p, long idx) {
+  // CHECK-LABEL: @test_vstl2dncot_vssl
+  // CHECK: call void @llvm.ve.vl.vstl2dncot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  _vel_vstl2dncot_vssl(vr1, idx, p, 256);
+}
+
+void __attribute__((noinline))
+test_pfchv_ssl(char* p, long idx) {
+  // CHECK-LABEL: @test_pfchv_ssl
+  // CHECK: call void @llvm.ve.vl.pfchv.ssl(i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  _vel_pfchv_ssl(idx, p, 256);
+}
+
+void __attribute__((noinline))
+test_pfchvnc_ssl(char* p, long idx) {
+  // CHECK-LABEL: @test_pfchvnc_ssl
+  // CHECK: call void @llvm.ve.vl.pfchvnc.ssl(i64 %{{.*}}, i8* %{{.*}}, i32 256)
+  _vel_pfchvnc_ssl(idx, p, 256);
+}
+
+void __attribute__((noinline))
+test_lsv_vvss(int idx) {
+  // CHECK-LABEL: @test_lsv_vvss
+  // CHECK: call <256 x double> @llvm.ve.vl.lsv.vvss(<256 x double> %{{.*}}, i32 %{{.*}}, i64 %{{.*}})
+  vr1 = _vel_lsv_vvss(vr1, idx, v1);
+}
+
+void __attribute__((noinline))
+test_lvsl_svs(int idx) {
+  // CHECK-LABEL: @test_lvsl_svs
+  // CHECK: call i64 @llvm.ve.vl.lvsl.svs(<256 x double> %{{.*}}, i32 %{{.*}})
+  v1 = _vel_lvsl_svs(vr1, idx);
+}
+
+void __attribute__((noinline))
+test_lvsd_svs(int idx) {
+  // CHECK-LABEL: @test_lvsd_svs
+  // CHECK: call double @llvm.ve.vl.lvsd.svs(<256 x double> %{{.*}}, i32 %{{.*}})
+  vd1 = _vel_lvsd_svs(vr1, idx);
+}
+
+void __attribute__((noinline))
+test_lvss_svs(int idx) {
+  // CHECK-LABEL: @test_lvss_svs
+  // CHECK: call float @llvm.ve.vl.lvss.svs(<256 x double> %{{.*}}, i32 %{{.*}})
+  vf1 = _vel_lvss_svs(vr1, idx);
+}
+
+void __attribute__((noinline))
+test_vbrdd_vsl() {
+  // CHECK-LABEL: @test_vbrdd_vsl
+  // CHECK: call <256 x double> @llvm.ve.vl.vbrdd.vsl(double %{{.*}}, i32 256)
+  vr1 = _vel_vbrdd_vsl(vd1, 256);
+}
+
+void __attribute__((noinline))
+test_vbrdd_vsvl() {
+  // CHECK-LABEL: @test_vbrdd_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vbrdd.vsvl(double %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr1 = _vel_vbrdd_vsvl(vd1, vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vbrdl_vsl() {
+  // CHECK-LABEL: @test_vbrdl_vsl
+  // CHECK: call <256 x double> @llvm.ve.vl.vbrdl.vsl(i64 %{{.*}}, i32 256)
+  vr1 = _vel_vbrdl_vsl(v1, 256);
+}
+
+void __attribute__((noinline))
+test_vbrdl_vsvl() {
+  // CHECK-LABEL: @test_vbrdl_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vbrdl.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr1 = _vel_vbrdl_vsvl(v1, vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vbrds_vsl() {
+  // CHECK-LABEL: @test_vbrds_vsl
+  // CHECK: call <256 x double> @llvm.ve.vl.vbrds.vsl(float %{{.*}}, i32 256)
+  vr1 = _vel_vbrds_vsl(vf1, 256);
+}
+
+void __attribute__((noinline))
+test_vbrds_vsvl() {
+  // CHECK-LABEL: @test_vbrds_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vbrds.vsvl(float %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr1 = _vel_vbrds_vsvl(vf1, vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vbrdw_vsl() {
+  // CHECK-LABEL: @test_vbrdw_vsl
+  // CHECK: call <256 x double> @llvm.ve.vl.vbrdw.vsl(i32 %{{.*}}, i32 256)
+  vr1 = _vel_vbrdw_vsl(v1, 256);
+}
+
+void __attribute__((noinline))
+test_vbrdw_vsvl() {
+  // CHECK-LABEL: @test_vbrdw_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vbrdw.vsvl(i32 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr1 = _vel_vbrdw_vsvl(v1, vr1, 256);
+}
+
+void __attribute__((noinline))
+test_pvbrd_vsl() {
+  // CHECK-LABEL: @test_pvbrd_vsl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvbrd.vsl(i64 %{{.*}}, i32 256)
+  vr1 = _vel_pvbrd_vsl(v1, 256);
+}
+
+void __attribute__((noinline))
+test_pvbrd_vsvl() {
+  // CHECK-LABEL: @test_pvbrd_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvbrd.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr1 = _vel_pvbrd_vsvl(v1, vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vmv_vsvl() {
+  // CHECK-LABEL: @test_vmv_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmv.vsvl(i32 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr1 = _vel_vmv_vsvl(v1, vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vmv_vsvvl() {
+  // CHECK-LABEL: @test_vmv_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmv.vsvvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr1 = _vel_vmv_vsvvl(v1, vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vaddul_vvvl() {
+  // CHECK-LABEL: @test_vaddul_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vaddul.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vaddul_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vaddul_vvvvl() {
+  // CHECK-LABEL: @test_vaddul_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vaddul.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vaddul_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vaddul_vsvl() {
+  // CHECK-LABEL: @test_vaddul_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vaddul.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vaddul_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vaddul_vsvvl() {
+  // CHECK-LABEL: @test_vaddul_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vaddul.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vaddul_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vadduw_vvvl() {
+  // CHECK-LABEL: @test_vadduw_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vadduw.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vadduw_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vadduw_vvvvl() {
+  // CHECK-LABEL: @test_vadduw_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vadduw.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vadduw_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vadduw_vsvl() {
+  // CHECK-LABEL: @test_vadduw_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vadduw.vsvl(i32 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vadduw_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vadduw_vsvvl() {
+  // CHECK-LABEL: @test_vadduw_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vadduw.vsvvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vadduw_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvaddu_vvvl() {
+  // CHECK-LABEL: @test_pvaddu_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvaddu.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvaddu_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvaddu_vvvvl() {
+  // CHECK-LABEL: @test_pvaddu_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvaddu.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvaddu_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvaddu_vsvl() {
+  // CHECK-LABEL: @test_pvaddu_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvaddu.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvaddu_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvaddu_vsvvl() {
+  // CHECK-LABEL: @test_pvaddu_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvaddu.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvaddu_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vaddswsx_vvvl() {
+  // CHECK-LABEL: @test_vaddswsx_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vaddswsx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vaddswsx_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vaddswsx_vvvvl() {
+  // CHECK-LABEL: @test_vaddswsx_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vaddswsx.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vaddswsx_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vaddswsx_vsvl() {
+  // CHECK-LABEL: @test_vaddswsx_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vaddswsx.vsvl(i32 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vaddswsx_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vaddswsx_vsvvl() {
+  // CHECK-LABEL: @test_vaddswsx_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vaddswsx.vsvvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vaddswsx_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vaddswzx_vvvl() {
+  // CHECK-LABEL: @test_vaddswzx_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vaddswzx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vaddswzx_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vaddswzx_vvvvl() {
+  // CHECK-LABEL: @test_vaddswzx_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vaddswzx.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vaddswzx_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vaddswzx_vsvl() {
+  // CHECK-LABEL: @test_vaddswzx_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vaddswzx.vsvl(i32 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vaddswzx_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vaddswzx_vsvvl() {
+  // CHECK-LABEL: @test_vaddswzx_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vaddswzx.vsvvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vaddswzx_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvadds_vvvl() {
+  // CHECK-LABEL: @test_pvadds_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvadds.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvadds_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvadds_vvvvl() {
+  // CHECK-LABEL: @test_pvadds_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvadds.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvadds_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvadds_vsvl() {
+  // CHECK-LABEL: @test_pvadds_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvadds.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvadds_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvadds_vsvvl() {
+  // CHECK-LABEL: @test_pvadds_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvadds.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvadds_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vaddsl_vvvl() {
+  // CHECK-LABEL: @test_vaddsl_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vaddsl.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vaddsl_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vaddsl_vvvvl() {
+  // CHECK-LABEL: @test_vaddsl_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vaddsl.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vaddsl_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vaddsl_vsvl() {
+  // CHECK-LABEL: @test_vaddsl_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vaddsl.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vaddsl_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vaddsl_vsvvl() {
+  // CHECK-LABEL: @test_vaddsl_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vaddsl.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vaddsl_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vsubul_vvvl() {
+  // CHECK-LABEL: @test_vsubul_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsubul.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsubul_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vsubul_vvvvl() {
+  // CHECK-LABEL: @test_vsubul_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsubul.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsubul_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vsubul_vsvl() {
+  // CHECK-LABEL: @test_vsubul_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsubul.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsubul_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vsubul_vsvvl() {
+  // CHECK-LABEL: @test_vsubul_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsubul.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsubul_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vsubuw_vvvl() {
+  // CHECK-LABEL: @test_vsubuw_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsubuw.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsubuw_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vsubuw_vvvvl() {
+  // CHECK-LABEL: @test_vsubuw_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsubuw.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsubuw_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vsubuw_vsvl() {
+  // CHECK-LABEL: @test_vsubuw_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsubuw.vsvl(i32 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsubuw_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vsubuw_vsvvl() {
+  // CHECK-LABEL: @test_vsubuw_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsubuw.vsvvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsubuw_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvsubu_vvvl() {
+  // CHECK-LABEL: @test_pvsubu_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsubu.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvsubu_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvsubu_vvvvl() {
+  // CHECK-LABEL: @test_pvsubu_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsubu.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvsubu_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvsubu_vsvl() {
+  // CHECK-LABEL: @test_pvsubu_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsubu.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvsubu_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvsubu_vsvvl() {
+  // CHECK-LABEL: @test_pvsubu_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsubu.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvsubu_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vsubswsx_vvvl() {
+  // CHECK-LABEL: @test_vsubswsx_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsubswsx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsubswsx_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vsubswsx_vvvvl() {
+  // CHECK-LABEL: @test_vsubswsx_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsubswsx.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsubswsx_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vsubswsx_vsvl() {
+  // CHECK-LABEL: @test_vsubswsx_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsubswsx.vsvl(i32 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsubswsx_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vsubswsx_vsvvl() {
+  // CHECK-LABEL: @test_vsubswsx_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsubswsx.vsvvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsubswsx_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vsubswzx_vvvl() {
+  // CHECK-LABEL: @test_vsubswzx_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsubswzx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsubswzx_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vsubswzx_vvvvl() {
+  // CHECK-LABEL: @test_vsubswzx_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsubswzx.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsubswzx_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vsubswzx_vsvl() {
+  // CHECK-LABEL: @test_vsubswzx_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsubswzx.vsvl(i32 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsubswzx_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vsubswzx_vsvvl() {
+  // CHECK-LABEL: @test_vsubswzx_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsubswzx.vsvvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsubswzx_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvsubs_vvvl() {
+  // CHECK-LABEL: @test_pvsubs_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsubs.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvsubs_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvsubs_vvvvl() {
+  // CHECK-LABEL: @test_pvsubs_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsubs.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvsubs_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvsubs_vsvl() {
+  // CHECK-LABEL: @test_pvsubs_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsubs.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvsubs_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvsubs_vsvvl() {
+  // CHECK-LABEL: @test_pvsubs_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsubs.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvsubs_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vsubsl_vvvl() {
+  // CHECK-LABEL: @test_vsubsl_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsubsl.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsubsl_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vsubsl_vvvvl() {
+  // CHECK-LABEL: @test_vsubsl_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsubsl.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsubsl_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vsubsl_vsvl() {
+  // CHECK-LABEL: @test_vsubsl_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsubsl.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsubsl_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vsubsl_vsvvl() {
+  // CHECK-LABEL: @test_vsubsl_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsubsl.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsubsl_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vmulul_vvvl() {
+  // CHECK-LABEL: @test_vmulul_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmulul.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmulul_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vmulul_vvvvl() {
+  // CHECK-LABEL: @test_vmulul_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmulul.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmulul_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vmulul_vsvl() {
+  // CHECK-LABEL: @test_vmulul_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmulul.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmulul_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vmulul_vsvvl() {
+  // CHECK-LABEL: @test_vmulul_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmulul.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmulul_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vmuluw_vvvl() {
+  // CHECK-LABEL: @test_vmuluw_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmuluw.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmuluw_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vmuluw_vvvvl() {
+  // CHECK-LABEL: @test_vmuluw_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmuluw.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmuluw_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vmuluw_vsvl() {
+  // CHECK-LABEL: @test_vmuluw_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmuluw.vsvl(i32 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmuluw_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vmuluw_vsvvl() {
+  // CHECK-LABEL: @test_vmuluw_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmuluw.vsvvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmuluw_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vmulswsx_vvvl() {
+  // CHECK-LABEL: @test_vmulswsx_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmulswsx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmulswsx_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vmulswsx_vvvvl() {
+  // CHECK-LABEL: @test_vmulswsx_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmulswsx.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmulswsx_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vmulswsx_vsvl() {
+  // CHECK-LABEL: @test_vmulswsx_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmulswsx.vsvl(i32 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmulswsx_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vmulswsx_vsvvl() {
+  // CHECK-LABEL: @test_vmulswsx_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmulswsx.vsvvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmulswsx_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vmulswzx_vvvl() {
+  // CHECK-LABEL: @test_vmulswzx_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmulswzx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmulswzx_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vmulswzx_vvvvl() {
+  // CHECK-LABEL: @test_vmulswzx_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmulswzx.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmulswzx_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vmulswzx_vsvl() {
+  // CHECK-LABEL: @test_vmulswzx_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmulswzx.vsvl(i32 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmulswzx_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vmulswzx_vsvvl() {
+  // CHECK-LABEL: @test_vmulswzx_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmulswzx.vsvvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmulswzx_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vmulsl_vvvl() {
+  // CHECK-LABEL: @test_vmulsl_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmulsl.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmulsl_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vmulsl_vvvvl() {
+  // CHECK-LABEL: @test_vmulsl_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmulsl.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmulsl_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vmulsl_vsvl() {
+  // CHECK-LABEL: @test_vmulsl_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmulsl.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmulsl_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vmulsl_vsvvl() {
+  // CHECK-LABEL: @test_vmulsl_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmulsl.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmulsl_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vmulslw_vvvl() {
+  // CHECK-LABEL: @test_vmulslw_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmulslw.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmulslw_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vmulslw_vvvvl() {
+  // CHECK-LABEL: @test_vmulslw_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmulslw.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmulslw_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vmulslw_vsvl() {
+  // CHECK-LABEL: @test_vmulslw_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmulslw.vsvl(i32 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmulslw_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vmulslw_vsvvl() {
+  // CHECK-LABEL: @test_vmulslw_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmulslw.vsvvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmulslw_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vdivul_vvvl() {
+  // CHECK-LABEL: @test_vdivul_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivul.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivul_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vdivul_vvvvl() {
+  // CHECK-LABEL: @test_vdivul_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivul.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivul_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vdivul_vsvl() {
+  // CHECK-LABEL: @test_vdivul_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivul.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivul_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vdivul_vsvvl() {
+  // CHECK-LABEL: @test_vdivul_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivul.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivul_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vdivuw_vvvl() {
+  // CHECK-LABEL: @test_vdivuw_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivuw.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivuw_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vdivuw_vvvvl() {
+  // CHECK-LABEL: @test_vdivuw_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivuw.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivuw_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vdivuw_vsvl() {
+  // CHECK-LABEL: @test_vdivuw_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivuw.vsvl(i32 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivuw_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vdivuw_vsvvl() {
+  // CHECK-LABEL: @test_vdivuw_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivuw.vsvvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivuw_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vdivul_vvsl() {
+  // CHECK-LABEL: @test_vdivul_vvsl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivul.vvsl(<256 x double> %{{.*}}, i64 %{{.*}}, i32 256)
+  vr3 = _vel_vdivul_vvsl(vr1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_vdivul_vvsvl() {
+  // CHECK-LABEL: @test_vdivul_vvsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivul.vvsvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivul_vvsvl(vr1, v2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vdivuw_vvsl() {
+  // CHECK-LABEL: @test_vdivuw_vvsl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivuw.vvsl(<256 x double> %{{.*}}, i32 %{{.*}}, i32 256)
+  vr3 = _vel_vdivuw_vvsl(vr1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_vdivuw_vvsvl() {
+  // CHECK-LABEL: @test_vdivuw_vvsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivuw.vvsvl(<256 x double> %{{.*}}, i32 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivuw_vvsvl(vr1, v2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vdivswsx_vvvl() {
+  // CHECK-LABEL: @test_vdivswsx_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivswsx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivswsx_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vdivswsx_vvvvl() {
+  // CHECK-LABEL: @test_vdivswsx_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivswsx.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivswsx_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vdivswsx_vsvl() {
+  // CHECK-LABEL: @test_vdivswsx_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivswsx.vsvl(i32 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivswsx_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vdivswsx_vsvvl() {
+  // CHECK-LABEL: @test_vdivswsx_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivswsx.vsvvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivswsx_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vdivswzx_vvvl() {
+  // CHECK-LABEL: @test_vdivswzx_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivswzx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivswzx_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vdivswzx_vvvvl() {
+  // CHECK-LABEL: @test_vdivswzx_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivswzx.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivswzx_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vdivswzx_vsvl() {
+  // CHECK-LABEL: @test_vdivswzx_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivswzx.vsvl(i32 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivswzx_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vdivswzx_vsvvl() {
+  // CHECK-LABEL: @test_vdivswzx_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivswzx.vsvvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivswzx_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vdivswsx_vvsl() {
+  // CHECK-LABEL: @test_vdivswsx_vvsl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivswsx.vvsl(<256 x double> %{{.*}}, i32 %{{.*}}, i32 256)
+  vr3 = _vel_vdivswsx_vvsl(vr1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_vdivswsx_vvsvl() {
+  // CHECK-LABEL: @test_vdivswsx_vvsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivswsx.vvsvl(<256 x double> %{{.*}}, i32 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivswsx_vvsvl(vr1, v2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vdivswzx_vvsl() {
+  // CHECK-LABEL: @test_vdivswzx_vvsl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivswzx.vvsl(<256 x double> %{{.*}}, i32 %{{.*}}, i32 256)
+  vr3 = _vel_vdivswzx_vvsl(vr1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_vdivswzx_vvsvl() {
+  // CHECK-LABEL: @test_vdivswzx_vvsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivswzx.vvsvl(<256 x double> %{{.*}}, i32 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivswzx_vvsvl(vr1, v2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vdivsl_vvvl() {
+  // CHECK-LABEL: @test_vdivsl_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivsl.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivsl_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vdivsl_vvvvl() {
+  // CHECK-LABEL: @test_vdivsl_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivsl.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivsl_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vdivsl_vsvl() {
+  // CHECK-LABEL: @test_vdivsl_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivsl.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivsl_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vdivsl_vsvvl() {
+  // CHECK-LABEL: @test_vdivsl_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivsl.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivsl_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vdivsl_vvsl() {
+  // CHECK-LABEL: @test_vdivsl_vvsl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivsl.vvsl(<256 x double> %{{.*}}, i64 %{{.*}}, i32 256)
+  vr3 = _vel_vdivsl_vvsl(vr1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_vdivsl_vvsvl() {
+  // CHECK-LABEL: @test_vdivsl_vvsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vdivsl.vvsvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vdivsl_vvsvl(vr1, v2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vcmpul_vvvl() {
+  // CHECK-LABEL: @test_vcmpul_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcmpul.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcmpul_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcmpul_vvvvl() {
+  // CHECK-LABEL: @test_vcmpul_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcmpul.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcmpul_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vcmpul_vsvl() {
+  // CHECK-LABEL: @test_vcmpul_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcmpul.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcmpul_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcmpul_vsvvl() {
+  // CHECK-LABEL: @test_vcmpul_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcmpul.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcmpul_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vcmpuw_vvvl() {
+  // CHECK-LABEL: @test_vcmpuw_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcmpuw.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcmpuw_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcmpuw_vvvvl() {
+  // CHECK-LABEL: @test_vcmpuw_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcmpuw.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcmpuw_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vcmpuw_vsvl() {
+  // CHECK-LABEL: @test_vcmpuw_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcmpuw.vsvl(i32 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcmpuw_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcmpuw_vsvvl() {
+  // CHECK-LABEL: @test_vcmpuw_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcmpuw.vsvvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcmpuw_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvcmpu_vvvl() {
+  // CHECK-LABEL: @test_pvcmpu_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvcmpu.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvcmpu_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvcmpu_vvvvl() {
+  // CHECK-LABEL: @test_pvcmpu_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvcmpu.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvcmpu_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvcmpu_vsvl() {
+  // CHECK-LABEL: @test_pvcmpu_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvcmpu.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvcmpu_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvcmpu_vsvvl() {
+  // CHECK-LABEL: @test_pvcmpu_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvcmpu.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvcmpu_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vcmpswsx_vvvl() {
+  // CHECK-LABEL: @test_vcmpswsx_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcmpswsx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcmpswsx_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcmpswsx_vvvvl() {
+  // CHECK-LABEL: @test_vcmpswsx_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcmpswsx.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcmpswsx_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vcmpswsx_vsvl() {
+  // CHECK-LABEL: @test_vcmpswsx_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcmpswsx.vsvl(i32 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcmpswsx_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcmpswsx_vsvvl() {
+  // CHECK-LABEL: @test_vcmpswsx_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcmpswsx.vsvvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcmpswsx_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vcmpswzx_vvvl() {
+  // CHECK-LABEL: @test_vcmpswzx_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcmpswzx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcmpswzx_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcmpswzx_vvvvl() {
+  // CHECK-LABEL: @test_vcmpswzx_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcmpswzx.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcmpswzx_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vcmpswzx_vsvl() {
+  // CHECK-LABEL: @test_vcmpswzx_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcmpswzx.vsvl(i32 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcmpswzx_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcmpswzx_vsvvl() {
+  // CHECK-LABEL: @test_vcmpswzx_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcmpswzx.vsvvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcmpswzx_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvcmps_vvvl() {
+  // CHECK-LABEL: @test_pvcmps_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvcmps.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvcmps_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvcmps_vvvvl() {
+  // CHECK-LABEL: @test_pvcmps_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvcmps.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvcmps_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvcmps_vsvl() {
+  // CHECK-LABEL: @test_pvcmps_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvcmps.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvcmps_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvcmps_vsvvl() {
+  // CHECK-LABEL: @test_pvcmps_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvcmps.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvcmps_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vcmpsl_vvvl() {
+  // CHECK-LABEL: @test_vcmpsl_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcmpsl.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcmpsl_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcmpsl_vvvvl() {
+  // CHECK-LABEL: @test_vcmpsl_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcmpsl.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcmpsl_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vcmpsl_vsvl() {
+  // CHECK-LABEL: @test_vcmpsl_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcmpsl.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcmpsl_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcmpsl_vsvvl() {
+  // CHECK-LABEL: @test_vcmpsl_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcmpsl.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcmpsl_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vmaxswsx_vvvl() {
+  // CHECK-LABEL: @test_vmaxswsx_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmaxswsx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmaxswsx_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vmaxswsx_vvvvl() {
+  // CHECK-LABEL: @test_vmaxswsx_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmaxswsx.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmaxswsx_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vmaxswsx_vsvl() {
+  // CHECK-LABEL: @test_vmaxswsx_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmaxswsx.vsvl(i32 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmaxswsx_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vmaxswsx_vsvvl() {
+  // CHECK-LABEL: @test_vmaxswsx_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmaxswsx.vsvvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmaxswsx_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vmaxswzx_vvvl() {
+  // CHECK-LABEL: @test_vmaxswzx_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmaxswzx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmaxswzx_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vmaxswzx_vvvvl() {
+  // CHECK-LABEL: @test_vmaxswzx_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmaxswzx.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmaxswzx_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vmaxswzx_vsvl() {
+  // CHECK-LABEL: @test_vmaxswzx_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmaxswzx.vsvl(i32 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmaxswzx_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vmaxswzx_vsvvl() {
+  // CHECK-LABEL: @test_vmaxswzx_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmaxswzx.vsvvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmaxswzx_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvmaxs_vvvl() {
+  // CHECK-LABEL: @test_pvmaxs_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvmaxs.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvmaxs_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvmaxs_vvvvl() {
+  // CHECK-LABEL: @test_pvmaxs_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvmaxs.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvmaxs_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvmaxs_vsvl() {
+  // CHECK-LABEL: @test_pvmaxs_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvmaxs.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvmaxs_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvmaxs_vsvvl() {
+  // CHECK-LABEL: @test_pvmaxs_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvmaxs.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvmaxs_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vminswsx_vvvl() {
+  // CHECK-LABEL: @test_vminswsx_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vminswsx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vminswsx_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vminswsx_vvvvl() {
+  // CHECK-LABEL: @test_vminswsx_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vminswsx.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vminswsx_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vminswsx_vsvl() {
+  // CHECK-LABEL: @test_vminswsx_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vminswsx.vsvl(i32 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vminswsx_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vminswsx_vsvvl() {
+  // CHECK-LABEL: @test_vminswsx_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vminswsx.vsvvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vminswsx_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vminswzx_vvvl() {
+  // CHECK-LABEL: @test_vminswzx_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vminswzx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vminswzx_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vminswzx_vvvvl() {
+  // CHECK-LABEL: @test_vminswzx_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vminswzx.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vminswzx_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vminswzx_vsvl() {
+  // CHECK-LABEL: @test_vminswzx_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vminswzx.vsvl(i32 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vminswzx_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vminswzx_vsvvl() {
+  // CHECK-LABEL: @test_vminswzx_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vminswzx.vsvvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vminswzx_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvmins_vvvl() {
+  // CHECK-LABEL: @test_pvmins_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvmins.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvmins_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvmins_vvvvl() {
+  // CHECK-LABEL: @test_pvmins_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvmins.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvmins_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvmins_vsvl() {
+  // CHECK-LABEL: @test_pvmins_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvmins.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvmins_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvmins_vsvvl() {
+  // CHECK-LABEL: @test_pvmins_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvmins.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvmins_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vmaxsl_vvvl() {
+  // CHECK-LABEL: @test_vmaxsl_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmaxsl.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmaxsl_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vmaxsl_vvvvl() {
+  // CHECK-LABEL: @test_vmaxsl_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmaxsl.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmaxsl_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vmaxsl_vsvl() {
+  // CHECK-LABEL: @test_vmaxsl_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmaxsl.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmaxsl_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vmaxsl_vsvvl() {
+  // CHECK-LABEL: @test_vmaxsl_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vmaxsl.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vmaxsl_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vminsl_vvvl() {
+  // CHECK-LABEL: @test_vminsl_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vminsl.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vminsl_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vminsl_vvvvl() {
+  // CHECK-LABEL: @test_vminsl_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vminsl.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vminsl_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vminsl_vsvl() {
+  // CHECK-LABEL: @test_vminsl_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vminsl.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vminsl_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vminsl_vsvvl() {
+  // CHECK-LABEL: @test_vminsl_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vminsl.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vminsl_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vand_vvvl() {
+  // CHECK-LABEL: @test_vand_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vand.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vand_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vand_vvvvl() {
+  // CHECK-LABEL: @test_vand_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vand.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vand_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vand_vsvl() {
+  // CHECK-LABEL: @test_vand_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vand.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vand_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vand_vsvvl() {
+  // CHECK-LABEL: @test_vand_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vand.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vand_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvand_vvvl() {
+  // CHECK-LABEL: @test_pvand_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvand.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvand_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvand_vvvvl() {
+  // CHECK-LABEL: @test_pvand_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvand.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvand_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvand_vsvl() {
+  // CHECK-LABEL: @test_pvand_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvand.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvand_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvand_vsvvl() {
+  // CHECK-LABEL: @test_pvand_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvand.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvand_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vor_vvvl() {
+  // CHECK-LABEL: @test_vor_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vor.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vor_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vor_vvvvl() {
+  // CHECK-LABEL: @test_vor_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vor.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vor_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vor_vsvl() {
+  // CHECK-LABEL: @test_vor_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vor.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vor_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vor_vsvvl() {
+  // CHECK-LABEL: @test_vor_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vor.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vor_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvor_vvvl() {
+  // CHECK-LABEL: @test_pvor_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvor.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvor_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvor_vvvvl() {
+  // CHECK-LABEL: @test_pvor_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvor.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvor_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvor_vsvl() {
+  // CHECK-LABEL: @test_pvor_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvor.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvor_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvor_vsvvl() {
+  // CHECK-LABEL: @test_pvor_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvor.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvor_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vxor_vvvl() {
+  // CHECK-LABEL: @test_vxor_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vxor.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vxor_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vxor_vvvvl() {
+  // CHECK-LABEL: @test_vxor_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vxor.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vxor_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vxor_vsvl() {
+  // CHECK-LABEL: @test_vxor_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vxor.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vxor_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vxor_vsvvl() {
+  // CHECK-LABEL: @test_vxor_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vxor.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vxor_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvxor_vvvl() {
+  // CHECK-LABEL: @test_pvxor_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvxor.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvxor_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvxor_vvvvl() {
+  // CHECK-LABEL: @test_pvxor_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvxor.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvxor_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvxor_vsvl() {
+  // CHECK-LABEL: @test_pvxor_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvxor.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvxor_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvxor_vsvvl() {
+  // CHECK-LABEL: @test_pvxor_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvxor.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvxor_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_veqv_vvvl() {
+  // CHECK-LABEL: @test_veqv_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.veqv.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_veqv_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_veqv_vvvvl() {
+  // CHECK-LABEL: @test_veqv_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.veqv.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_veqv_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_veqv_vsvl() {
+  // CHECK-LABEL: @test_veqv_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.veqv.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_veqv_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_veqv_vsvvl() {
+  // CHECK-LABEL: @test_veqv_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.veqv.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_veqv_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pveqv_vvvl() {
+  // CHECK-LABEL: @test_pveqv_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pveqv.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pveqv_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pveqv_vvvvl() {
+  // CHECK-LABEL: @test_pveqv_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pveqv.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pveqv_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pveqv_vsvl() {
+  // CHECK-LABEL: @test_pveqv_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pveqv.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pveqv_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pveqv_vsvvl() {
+  // CHECK-LABEL: @test_pveqv_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pveqv.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pveqv_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vldz_vvl() {
+  // CHECK-LABEL: @test_vldz_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vldz.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vldz_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vldz_vvvl() {
+  // CHECK-LABEL: @test_vldz_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vldz.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vldz_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvldzlo_vvl() {
+  // CHECK-LABEL: @test_pvldzlo_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvldzlo.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvldzlo_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_pvldzlo_vvvl() {
+  // CHECK-LABEL: @test_pvldzlo_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvldzlo.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvldzlo_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvldzup_vvl() {
+  // CHECK-LABEL: @test_pvldzup_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvldzup.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvldzup_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_pvldzup_vvvl() {
+  // CHECK-LABEL: @test_pvldzup_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvldzup.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvldzup_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvldz_vvl() {
+  // CHECK-LABEL: @test_pvldz_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvldz.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvldz_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_pvldz_vvvl() {
+  // CHECK-LABEL: @test_pvldz_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvldz.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvldz_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vpcnt_vvl() {
+  // CHECK-LABEL: @test_vpcnt_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vpcnt.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vpcnt_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vpcnt_vvvl() {
+  // CHECK-LABEL: @test_vpcnt_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vpcnt.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vpcnt_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvpcntlo_vvl() {
+  // CHECK-LABEL: @test_pvpcntlo_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvpcntlo.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvpcntlo_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_pvpcntlo_vvvl() {
+  // CHECK-LABEL: @test_pvpcntlo_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvpcntlo.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvpcntlo_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvpcntup_vvl() {
+  // CHECK-LABEL: @test_pvpcntup_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvpcntup.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvpcntup_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_pvpcntup_vvvl() {
+  // CHECK-LABEL: @test_pvpcntup_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvpcntup.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvpcntup_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvpcnt_vvl() {
+  // CHECK-LABEL: @test_pvpcnt_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvpcnt.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvpcnt_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_pvpcnt_vvvl() {
+  // CHECK-LABEL: @test_pvpcnt_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvpcnt.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvpcnt_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vbrv_vvl() {
+  // CHECK-LABEL: @test_vbrv_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vbrv.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vbrv_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vbrv_vvvl() {
+  // CHECK-LABEL: @test_vbrv_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vbrv.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vbrv_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvbrvlo_vvl() {
+  // CHECK-LABEL: @test_pvbrvlo_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvbrvlo.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvbrvlo_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_pvbrvlo_vvvl() {
+  // CHECK-LABEL: @test_pvbrvlo_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvbrvlo.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvbrvlo_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvbrvup_vvl() {
+  // CHECK-LABEL: @test_pvbrvup_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvbrvup.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvbrvup_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_pvbrvup_vvvl() {
+  // CHECK-LABEL: @test_pvbrvup_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvbrvup.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvbrvup_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvbrv_vvl() {
+  // CHECK-LABEL: @test_pvbrv_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvbrv.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvbrv_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_pvbrv_vvvl() {
+  // CHECK-LABEL: @test_pvbrv_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvbrv.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvbrv_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vseq_vl() {
+  // CHECK-LABEL: @test_vseq_vl
+  // CHECK: call <256 x double> @llvm.ve.vl.vseq.vl(i32 256)
+  vr1 = _vel_vseq_vl(256);
+}
+
+void __attribute__((noinline))
+test_vseq_vvl() {
+  // CHECK-LABEL: @test_vseq_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vseq.vvl(<256 x double> %{{.*}}, i32 256)
+  vr1 = _vel_vseq_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_pvseqlo_vl() {
+  // CHECK-LABEL: @test_pvseqlo_vl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvseqlo.vl(i32 256)
+  vr1 = _vel_pvseqlo_vl(256);
+}
+
+void __attribute__((noinline))
+test_pvseqlo_vvl() {
+  // CHECK-LABEL: @test_pvseqlo_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvseqlo.vvl(<256 x double> %{{.*}}, i32 256)
+  vr1 = _vel_pvseqlo_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_pvsequp_vl() {
+  // CHECK-LABEL: @test_pvsequp_vl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsequp.vl(i32 256)
+  vr1 = _vel_pvsequp_vl(256);
+}
+
+void __attribute__((noinline))
+test_pvsequp_vvl() {
+  // CHECK-LABEL: @test_pvsequp_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsequp.vvl(<256 x double> %{{.*}}, i32 256)
+  vr1 = _vel_pvsequp_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_pvseq_vl() {
+  // CHECK-LABEL: @test_pvseq_vl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvseq.vl(i32 256)
+  vr1 = _vel_pvseq_vl(256);
+}
+
+void __attribute__((noinline))
+test_pvseq_vvl() {
+  // CHECK-LABEL: @test_pvseq_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvseq.vvl(<256 x double> %{{.*}}, i32 256)
+  vr1 = _vel_pvseq_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vsll_vvvl() {
+  // CHECK-LABEL: @test_vsll_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsll.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsll_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vsll_vvvvl() {
+  // CHECK-LABEL: @test_vsll_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsll.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsll_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vsll_vvsl() {
+  // CHECK-LABEL: @test_vsll_vvsl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsll.vvsl(<256 x double> %{{.*}}, i64 %{{.*}}, i32 256)
+  vr3 = _vel_vsll_vvsl(vr1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_vsll_vvsvl() {
+  // CHECK-LABEL: @test_vsll_vvsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsll.vvsvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsll_vvsvl(vr1, v2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvsll_vvvl() {
+  // CHECK-LABEL: @test_pvsll_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsll.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvsll_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvsll_vvvvl() {
+  // CHECK-LABEL: @test_pvsll_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsll.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvsll_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvsll_vvsl() {
+  // CHECK-LABEL: @test_pvsll_vvsl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsll.vvsl(<256 x double> %{{.*}}, i64 %{{.*}}, i32 256)
+  vr3 = _vel_pvsll_vvsl(vr1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_pvsll_vvsvl() {
+  // CHECK-LABEL: @test_pvsll_vvsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsll.vvsvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvsll_vvsvl(vr1, v2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vsrl_vvvl() {
+  // CHECK-LABEL: @test_vsrl_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsrl.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsrl_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vsrl_vvvvl() {
+  // CHECK-LABEL: @test_vsrl_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsrl.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsrl_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vsrl_vvsl() {
+  // CHECK-LABEL: @test_vsrl_vvsl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsrl.vvsl(<256 x double> %{{.*}}, i64 %{{.*}}, i32 256)
+  vr3 = _vel_vsrl_vvsl(vr1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_vsrl_vvsvl() {
+  // CHECK-LABEL: @test_vsrl_vvsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsrl.vvsvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsrl_vvsvl(vr1, v2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvsrl_vvvl() {
+  // CHECK-LABEL: @test_pvsrl_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsrl.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvsrl_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvsrl_vvvvl() {
+  // CHECK-LABEL: @test_pvsrl_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsrl.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvsrl_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvsrl_vvsl() {
+  // CHECK-LABEL: @test_pvsrl_vvsl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsrl.vvsl(<256 x double> %{{.*}}, i64 %{{.*}}, i32 256)
+  vr3 = _vel_pvsrl_vvsl(vr1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_pvsrl_vvsvl() {
+  // CHECK-LABEL: @test_pvsrl_vvsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsrl.vvsvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvsrl_vvsvl(vr1, v2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vslawsx_vvvl() {
+  // CHECK-LABEL: @test_vslawsx_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vslawsx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vslawsx_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vslawsx_vvvvl() {
+  // CHECK-LABEL: @test_vslawsx_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vslawsx.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vslawsx_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vslawsx_vvsl() {
+  // CHECK-LABEL: @test_vslawsx_vvsl
+  // CHECK: call <256 x double> @llvm.ve.vl.vslawsx.vvsl(<256 x double> %{{.*}}, i32 %{{.*}}, i32 256)
+  vr3 = _vel_vslawsx_vvsl(vr1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_vslawsx_vvsvl() {
+  // CHECK-LABEL: @test_vslawsx_vvsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vslawsx.vvsvl(<256 x double> %{{.*}}, i32 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vslawsx_vvsvl(vr1, v2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vslawzx_vvvl() {
+  // CHECK-LABEL: @test_vslawzx_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vslawzx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vslawzx_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vslawzx_vvvvl() {
+  // CHECK-LABEL: @test_vslawzx_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vslawzx.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vslawzx_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vslawzx_vvsl() {
+  // CHECK-LABEL: @test_vslawzx_vvsl
+  // CHECK: call <256 x double> @llvm.ve.vl.vslawzx.vvsl(<256 x double> %{{.*}}, i32 %{{.*}}, i32 256)
+  vr3 = _vel_vslawzx_vvsl(vr1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_vslawzx_vvsvl() {
+  // CHECK-LABEL: @test_vslawzx_vvsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vslawzx.vvsvl(<256 x double> %{{.*}}, i32 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vslawzx_vvsvl(vr1, v2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvsla_vvvl() {
+  // CHECK-LABEL: @test_pvsla_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsla.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvsla_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvsla_vvvvl() {
+  // CHECK-LABEL: @test_pvsla_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsla.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvsla_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvsla_vvsl() {
+  // CHECK-LABEL: @test_pvsla_vvsl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsla.vvsl(<256 x double> %{{.*}}, i64 %{{.*}}, i32 256)
+  vr3 = _vel_pvsla_vvsl(vr1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_pvsla_vvsvl() {
+  // CHECK-LABEL: @test_pvsla_vvsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsla.vvsvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvsla_vvsvl(vr1, v2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vslal_vvvl() {
+  // CHECK-LABEL: @test_vslal_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vslal.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vslal_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vslal_vvvvl() {
+  // CHECK-LABEL: @test_vslal_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vslal.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vslal_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vslal_vvsl() {
+  // CHECK-LABEL: @test_vslal_vvsl
+  // CHECK: call <256 x double> @llvm.ve.vl.vslal.vvsl(<256 x double> %{{.*}}, i64 %{{.*}}, i32 256)
+  vr3 = _vel_vslal_vvsl(vr1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_vslal_vvsvl() {
+  // CHECK-LABEL: @test_vslal_vvsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vslal.vvsvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vslal_vvsvl(vr1, v2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vsrawsx_vvvl() {
+  // CHECK-LABEL: @test_vsrawsx_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsrawsx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsrawsx_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vsrawsx_vvvvl() {
+  // CHECK-LABEL: @test_vsrawsx_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsrawsx.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsrawsx_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vsrawsx_vvsl() {
+  // CHECK-LABEL: @test_vsrawsx_vvsl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsrawsx.vvsl(<256 x double> %{{.*}}, i32 %{{.*}}, i32 256)
+  vr3 = _vel_vsrawsx_vvsl(vr1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_vsrawsx_vvsvl() {
+  // CHECK-LABEL: @test_vsrawsx_vvsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsrawsx.vvsvl(<256 x double> %{{.*}}, i32 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsrawsx_vvsvl(vr1, v2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vsrawzx_vvvl() {
+  // CHECK-LABEL: @test_vsrawzx_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsrawzx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsrawzx_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vsrawzx_vvvvl() {
+  // CHECK-LABEL: @test_vsrawzx_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsrawzx.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsrawzx_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vsrawzx_vvsl() {
+  // CHECK-LABEL: @test_vsrawzx_vvsl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsrawzx.vvsl(<256 x double> %{{.*}}, i32 %{{.*}}, i32 256)
+  vr3 = _vel_vsrawzx_vvsl(vr1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_vsrawzx_vvsvl() {
+  // CHECK-LABEL: @test_vsrawzx_vvsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsrawzx.vvsvl(<256 x double> %{{.*}}, i32 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsrawzx_vvsvl(vr1, v2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvsra_vvvl() {
+  // CHECK-LABEL: @test_pvsra_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsra.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvsra_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvsra_vvvvl() {
+  // CHECK-LABEL: @test_pvsra_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsra.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvsra_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvsra_vvsl() {
+  // CHECK-LABEL: @test_pvsra_vvsl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsra.vvsl(<256 x double> %{{.*}}, i64 %{{.*}}, i32 256)
+  vr3 = _vel_pvsra_vvsl(vr1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_pvsra_vvsvl() {
+  // CHECK-LABEL: @test_pvsra_vvsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvsra.vvsvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvsra_vvsvl(vr1, v2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vsral_vvvl() {
+  // CHECK-LABEL: @test_vsral_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsral.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsral_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vsral_vvvvl() {
+  // CHECK-LABEL: @test_vsral_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsral.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsral_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vsral_vvsl() {
+  // CHECK-LABEL: @test_vsral_vvsl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsral.vvsl(<256 x double> %{{.*}}, i64 %{{.*}}, i32 256)
+  vr3 = _vel_vsral_vvsl(vr1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_vsral_vvsvl() {
+  // CHECK-LABEL: @test_vsral_vvsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsral.vvsvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsral_vvsvl(vr1, v2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vsfa_vvssl() {
+  // CHECK-LABEL: @test_vsfa_vvssl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsfa.vvssl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256)
+  vr3 = _vel_vsfa_vvssl(vr1, v1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_vsfa_vvssvl() {
+  // CHECK-LABEL: @test_vsfa_vvssvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsfa.vvssvl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vsfa_vvssvl(vr1, v1, v2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfaddd_vvvl() {
+  // CHECK-LABEL: @test_vfaddd_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfaddd.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfaddd_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfaddd_vvvvl() {
+  // CHECK-LABEL: @test_vfaddd_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfaddd.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfaddd_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfaddd_vsvl() {
+  // CHECK-LABEL: @test_vfaddd_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfaddd.vsvl(double %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfaddd_vsvl(vd1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfaddd_vsvvl() {
+  // CHECK-LABEL: @test_vfaddd_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfaddd.vsvvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfaddd_vsvvl(vd1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfadds_vvvl() {
+  // CHECK-LABEL: @test_vfadds_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfadds.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfadds_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfadds_vvvvl() {
+  // CHECK-LABEL: @test_vfadds_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfadds.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfadds_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfadds_vsvl() {
+  // CHECK-LABEL: @test_vfadds_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfadds.vsvl(float %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfadds_vsvl(vf1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfadds_vsvvl() {
+  // CHECK-LABEL: @test_vfadds_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfadds.vsvvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfadds_vsvvl(vf1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvfadd_vvvl() {
+  // CHECK-LABEL: @test_pvfadd_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfadd.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfadd_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvfadd_vvvvl() {
+  // CHECK-LABEL: @test_pvfadd_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfadd.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfadd_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvfadd_vsvl() {
+  // CHECK-LABEL: @test_pvfadd_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfadd.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfadd_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvfadd_vsvvl() {
+  // CHECK-LABEL: @test_pvfadd_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfadd.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfadd_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfsubd_vvvl() {
+  // CHECK-LABEL: @test_vfsubd_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfsubd.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfsubd_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfsubd_vvvvl() {
+  // CHECK-LABEL: @test_vfsubd_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfsubd.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfsubd_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfsubd_vsvl() {
+  // CHECK-LABEL: @test_vfsubd_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfsubd.vsvl(double %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfsubd_vsvl(vd1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfsubd_vsvvl() {
+  // CHECK-LABEL: @test_vfsubd_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfsubd.vsvvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfsubd_vsvvl(vd1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfsubs_vvvl() {
+  // CHECK-LABEL: @test_vfsubs_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfsubs.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfsubs_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfsubs_vvvvl() {
+  // CHECK-LABEL: @test_vfsubs_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfsubs.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfsubs_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfsubs_vsvl() {
+  // CHECK-LABEL: @test_vfsubs_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfsubs.vsvl(float %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfsubs_vsvl(vf1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfsubs_vsvvl() {
+  // CHECK-LABEL: @test_vfsubs_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfsubs.vsvvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfsubs_vsvvl(vf1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvfsub_vvvl() {
+  // CHECK-LABEL: @test_pvfsub_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfsub.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfsub_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvfsub_vvvvl() {
+  // CHECK-LABEL: @test_pvfsub_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfsub.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfsub_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvfsub_vsvl() {
+  // CHECK-LABEL: @test_pvfsub_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfsub.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfsub_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvfsub_vsvvl() {
+  // CHECK-LABEL: @test_pvfsub_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfsub.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfsub_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfmuld_vvvl() {
+  // CHECK-LABEL: @test_vfmuld_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmuld.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmuld_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfmuld_vvvvl() {
+  // CHECK-LABEL: @test_vfmuld_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmuld.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmuld_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfmuld_vsvl() {
+  // CHECK-LABEL: @test_vfmuld_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmuld.vsvl(double %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmuld_vsvl(vd1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfmuld_vsvvl() {
+  // CHECK-LABEL: @test_vfmuld_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmuld.vsvvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmuld_vsvvl(vd1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfmuls_vvvl() {
+  // CHECK-LABEL: @test_vfmuls_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmuls.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmuls_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfmuls_vvvvl() {
+  // CHECK-LABEL: @test_vfmuls_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmuls.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmuls_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfmuls_vsvl() {
+  // CHECK-LABEL: @test_vfmuls_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmuls.vsvl(float %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmuls_vsvl(vf1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfmuls_vsvvl() {
+  // CHECK-LABEL: @test_vfmuls_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmuls.vsvvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmuls_vsvvl(vf1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvfmul_vvvl() {
+  // CHECK-LABEL: @test_pvfmul_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmul.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfmul_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvfmul_vvvvl() {
+  // CHECK-LABEL: @test_pvfmul_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmul.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfmul_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvfmul_vsvl() {
+  // CHECK-LABEL: @test_pvfmul_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmul.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfmul_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvfmul_vsvvl() {
+  // CHECK-LABEL: @test_pvfmul_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmul.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfmul_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfdivd_vvvl() {
+  // CHECK-LABEL: @test_vfdivd_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfdivd.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfdivd_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfdivd_vvvvl() {
+  // CHECK-LABEL: @test_vfdivd_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfdivd.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfdivd_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfdivd_vsvl() {
+  // CHECK-LABEL: @test_vfdivd_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfdivd.vsvl(double %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfdivd_vsvl(vd1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfdivd_vsvvl() {
+  // CHECK-LABEL: @test_vfdivd_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfdivd.vsvvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfdivd_vsvvl(vd1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfdivs_vvvl() {
+  // CHECK-LABEL: @test_vfdivs_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfdivs.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfdivs_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfdivs_vvvvl() {
+  // CHECK-LABEL: @test_vfdivs_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfdivs.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfdivs_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfdivs_vsvl() {
+  // CHECK-LABEL: @test_vfdivs_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfdivs.vsvl(float %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfdivs_vsvl(vf1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfdivs_vsvvl() {
+  // CHECK-LABEL: @test_vfdivs_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfdivs.vsvvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfdivs_vsvvl(vf1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfsqrtd_vvl() {
+  // CHECK-LABEL: @test_vfsqrtd_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfsqrtd.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vfsqrtd_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vfsqrtd_vvvl() {
+  // CHECK-LABEL: @test_vfsqrtd_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfsqrtd.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfsqrtd_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfsqrts_vvl() {
+  // CHECK-LABEL: @test_vfsqrts_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfsqrts.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vfsqrts_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vfsqrts_vvvl() {
+  // CHECK-LABEL: @test_vfsqrts_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfsqrts.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfsqrts_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfcmpd_vvvl() {
+  // CHECK-LABEL: @test_vfcmpd_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfcmpd.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfcmpd_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfcmpd_vvvvl() {
+  // CHECK-LABEL: @test_vfcmpd_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfcmpd.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfcmpd_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfcmpd_vsvl() {
+  // CHECK-LABEL: @test_vfcmpd_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfcmpd.vsvl(double %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfcmpd_vsvl(vd1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfcmpd_vsvvl() {
+  // CHECK-LABEL: @test_vfcmpd_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfcmpd.vsvvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfcmpd_vsvvl(vd1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfcmps_vvvl() {
+  // CHECK-LABEL: @test_vfcmps_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfcmps.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfcmps_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfcmps_vvvvl() {
+  // CHECK-LABEL: @test_vfcmps_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfcmps.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfcmps_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfcmps_vsvl() {
+  // CHECK-LABEL: @test_vfcmps_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfcmps.vsvl(float %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfcmps_vsvl(vf1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfcmps_vsvvl() {
+  // CHECK-LABEL: @test_vfcmps_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfcmps.vsvvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfcmps_vsvvl(vf1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvfcmp_vvvl() {
+  // CHECK-LABEL: @test_pvfcmp_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfcmp.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfcmp_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvfcmp_vvvvl() {
+  // CHECK-LABEL: @test_pvfcmp_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfcmp.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfcmp_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvfcmp_vsvl() {
+  // CHECK-LABEL: @test_pvfcmp_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfcmp.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfcmp_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvfcmp_vsvvl() {
+  // CHECK-LABEL: @test_pvfcmp_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfcmp.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfcmp_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfmaxd_vvvl() {
+  // CHECK-LABEL: @test_vfmaxd_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmaxd.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmaxd_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfmaxd_vvvvl() {
+  // CHECK-LABEL: @test_vfmaxd_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmaxd.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmaxd_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfmaxd_vsvl() {
+  // CHECK-LABEL: @test_vfmaxd_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmaxd.vsvl(double %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmaxd_vsvl(vd1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfmaxd_vsvvl() {
+  // CHECK-LABEL: @test_vfmaxd_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmaxd.vsvvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmaxd_vsvvl(vd1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfmaxs_vvvl() {
+  // CHECK-LABEL: @test_vfmaxs_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmaxs.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmaxs_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfmaxs_vvvvl() {
+  // CHECK-LABEL: @test_vfmaxs_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmaxs.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmaxs_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfmaxs_vsvl() {
+  // CHECK-LABEL: @test_vfmaxs_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmaxs.vsvl(float %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmaxs_vsvl(vf1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfmaxs_vsvvl() {
+  // CHECK-LABEL: @test_vfmaxs_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmaxs.vsvvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmaxs_vsvvl(vf1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvfmax_vvvl() {
+  // CHECK-LABEL: @test_pvfmax_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmax.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfmax_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvfmax_vvvvl() {
+  // CHECK-LABEL: @test_pvfmax_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmax.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfmax_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvfmax_vsvl() {
+  // CHECK-LABEL: @test_pvfmax_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmax.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfmax_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvfmax_vsvvl() {
+  // CHECK-LABEL: @test_pvfmax_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmax.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfmax_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfmind_vvvl() {
+  // CHECK-LABEL: @test_vfmind_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmind.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmind_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfmind_vvvvl() {
+  // CHECK-LABEL: @test_vfmind_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmind.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmind_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfmind_vsvl() {
+  // CHECK-LABEL: @test_vfmind_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmind.vsvl(double %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmind_vsvl(vd1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfmind_vsvvl() {
+  // CHECK-LABEL: @test_vfmind_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmind.vsvvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmind_vsvvl(vd1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfmins_vvvl() {
+  // CHECK-LABEL: @test_vfmins_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmins.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmins_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfmins_vvvvl() {
+  // CHECK-LABEL: @test_vfmins_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmins.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmins_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfmins_vsvl() {
+  // CHECK-LABEL: @test_vfmins_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmins.vsvl(float %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmins_vsvl(vf1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfmins_vsvvl() {
+  // CHECK-LABEL: @test_vfmins_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmins.vsvvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfmins_vsvvl(vf1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvfmin_vvvl() {
+  // CHECK-LABEL: @test_pvfmin_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmin.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfmin_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvfmin_vvvvl() {
+  // CHECK-LABEL: @test_pvfmin_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmin.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfmin_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvfmin_vsvl() {
+  // CHECK-LABEL: @test_pvfmin_vsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmin.vsvl(i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfmin_vsvl(v1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvfmin_vsvvl() {
+  // CHECK-LABEL: @test_pvfmin_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmin.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvfmin_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfmadd_vvvvl() {
+  // CHECK-LABEL: @test_vfmadd_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmadd.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmadd_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfmadd_vvvvvl() {
+  // CHECK-LABEL: @test_vfmadd_vvvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmadd.vvvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmadd_vvvvvl(vr1, vr2, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfmadd_vsvvl() {
+  // CHECK-LABEL: @test_vfmadd_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmadd.vsvvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmadd_vsvvl(vd1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfmadd_vsvvvl() {
+  // CHECK-LABEL: @test_vfmadd_vsvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmadd.vsvvvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmadd_vsvvvl(vd1, vr2, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfmadd_vvsvl() {
+  // CHECK-LABEL: @test_vfmadd_vvsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmadd.vvsvl(<256 x double> %{{.*}}, double %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmadd_vvsvl(vr1, vd1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfmadd_vvsvvl() {
+  // CHECK-LABEL: @test_vfmadd_vvsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmadd.vvsvvl(<256 x double> %{{.*}}, double %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmadd_vvsvvl(vr1, vd1, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfmads_vvvvl() {
+  // CHECK-LABEL: @test_vfmads_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmads.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmads_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfmads_vvvvvl() {
+  // CHECK-LABEL: @test_vfmads_vvvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmads.vvvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmads_vvvvvl(vr1, vr2, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfmads_vsvvl() {
+  // CHECK-LABEL: @test_vfmads_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmads.vsvvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmads_vsvvl(vf1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfmads_vsvvvl() {
+  // CHECK-LABEL: @test_vfmads_vsvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmads.vsvvvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmads_vsvvvl(vf1, vr2, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfmads_vvsvl() {
+  // CHECK-LABEL: @test_vfmads_vvsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmads.vvsvl(<256 x double> %{{.*}}, float %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmads_vvsvl(vr1, vf1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfmads_vvsvvl() {
+  // CHECK-LABEL: @test_vfmads_vvsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmads.vvsvvl(<256 x double> %{{.*}}, float %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmads_vvsvvl(vr1, vf1, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_pvfmad_vvvvl() {
+  // CHECK-LABEL: @test_pvfmad_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmad.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfmad_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvfmad_vvvvvl() {
+  // CHECK-LABEL: @test_pvfmad_vvvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmad.vvvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfmad_vvvvvl(vr1, vr2, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_pvfmad_vsvvl() {
+  // CHECK-LABEL: @test_pvfmad_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmad.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfmad_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvfmad_vsvvvl() {
+  // CHECK-LABEL: @test_pvfmad_vsvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmad.vsvvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfmad_vsvvvl(v1, vr2, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_pvfmad_vvsvl() {
+  // CHECK-LABEL: @test_pvfmad_vvsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmad.vvsvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfmad_vvsvl(vr1, v1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvfmad_vvsvvl() {
+  // CHECK-LABEL: @test_pvfmad_vvsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmad.vvsvvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfmad_vvsvvl(vr1, v1, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfmsbd_vvvvl() {
+  // CHECK-LABEL: @test_vfmsbd_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmsbd.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmsbd_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfmsbd_vvvvvl() {
+  // CHECK-LABEL: @test_vfmsbd_vvvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmsbd.vvvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmsbd_vvvvvl(vr1, vr2, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfmsbd_vsvvl() {
+  // CHECK-LABEL: @test_vfmsbd_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmsbd.vsvvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmsbd_vsvvl(vd1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfmsbd_vsvvvl() {
+  // CHECK-LABEL: @test_vfmsbd_vsvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmsbd.vsvvvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmsbd_vsvvvl(vd1, vr2, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfmsbd_vvsvl() {
+  // CHECK-LABEL: @test_vfmsbd_vvsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmsbd.vvsvl(<256 x double> %{{.*}}, double %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmsbd_vvsvl(vr1, vd1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfmsbd_vvsvvl() {
+  // CHECK-LABEL: @test_vfmsbd_vvsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmsbd.vvsvvl(<256 x double> %{{.*}}, double %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmsbd_vvsvvl(vr1, vd1, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfmsbs_vvvvl() {
+  // CHECK-LABEL: @test_vfmsbs_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmsbs.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmsbs_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfmsbs_vvvvvl() {
+  // CHECK-LABEL: @test_vfmsbs_vvvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmsbs.vvvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmsbs_vvvvvl(vr1, vr2, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfmsbs_vsvvl() {
+  // CHECK-LABEL: @test_vfmsbs_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmsbs.vsvvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmsbs_vsvvl(vf1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfmsbs_vsvvvl() {
+  // CHECK-LABEL: @test_vfmsbs_vsvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmsbs.vsvvvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmsbs_vsvvvl(vf1, vr2, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfmsbs_vvsvl() {
+  // CHECK-LABEL: @test_vfmsbs_vvsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmsbs.vvsvl(<256 x double> %{{.*}}, float %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmsbs_vvsvl(vr1, vf1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfmsbs_vvsvvl() {
+  // CHECK-LABEL: @test_vfmsbs_vvsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfmsbs.vvsvvl(<256 x double> %{{.*}}, float %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfmsbs_vvsvvl(vr1, vf1, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_pvfmsb_vvvvl() {
+  // CHECK-LABEL: @test_pvfmsb_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmsb.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfmsb_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvfmsb_vvvvvl() {
+  // CHECK-LABEL: @test_pvfmsb_vvvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmsb.vvvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfmsb_vvvvvl(vr1, vr2, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_pvfmsb_vsvvl() {
+  // CHECK-LABEL: @test_pvfmsb_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmsb.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfmsb_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvfmsb_vsvvvl() {
+  // CHECK-LABEL: @test_pvfmsb_vsvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmsb.vsvvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfmsb_vsvvvl(v1, vr2, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_pvfmsb_vvsvl() {
+  // CHECK-LABEL: @test_pvfmsb_vvsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmsb.vvsvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfmsb_vvsvl(vr1, v1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvfmsb_vvsvvl() {
+  // CHECK-LABEL: @test_pvfmsb_vvsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfmsb.vvsvvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfmsb_vvsvvl(vr1, v1, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfnmadd_vvvvl() {
+  // CHECK-LABEL: @test_vfnmadd_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmadd.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmadd_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfnmadd_vvvvvl() {
+  // CHECK-LABEL: @test_vfnmadd_vvvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmadd.vvvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmadd_vvvvvl(vr1, vr2, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfnmadd_vsvvl() {
+  // CHECK-LABEL: @test_vfnmadd_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmadd.vsvvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmadd_vsvvl(vd1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfnmadd_vsvvvl() {
+  // CHECK-LABEL: @test_vfnmadd_vsvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmadd.vsvvvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmadd_vsvvvl(vd1, vr2, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfnmadd_vvsvl() {
+  // CHECK-LABEL: @test_vfnmadd_vvsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmadd.vvsvl(<256 x double> %{{.*}}, double %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmadd_vvsvl(vr1, vd1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfnmadd_vvsvvl() {
+  // CHECK-LABEL: @test_vfnmadd_vvsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmadd.vvsvvl(<256 x double> %{{.*}}, double %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmadd_vvsvvl(vr1, vd1, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfnmads_vvvvl() {
+  // CHECK-LABEL: @test_vfnmads_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmads.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmads_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfnmads_vvvvvl() {
+  // CHECK-LABEL: @test_vfnmads_vvvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmads.vvvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmads_vvvvvl(vr1, vr2, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfnmads_vsvvl() {
+  // CHECK-LABEL: @test_vfnmads_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmads.vsvvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmads_vsvvl(vf1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfnmads_vsvvvl() {
+  // CHECK-LABEL: @test_vfnmads_vsvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmads.vsvvvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmads_vsvvvl(vf1, vr2, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfnmads_vvsvl() {
+  // CHECK-LABEL: @test_vfnmads_vvsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmads.vvsvl(<256 x double> %{{.*}}, float %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmads_vvsvl(vr1, vf1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfnmads_vvsvvl() {
+  // CHECK-LABEL: @test_vfnmads_vvsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmads.vvsvvl(<256 x double> %{{.*}}, float %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmads_vvsvvl(vr1, vf1, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_pvfnmad_vvvvl() {
+  // CHECK-LABEL: @test_pvfnmad_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfnmad.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfnmad_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvfnmad_vvvvvl() {
+  // CHECK-LABEL: @test_pvfnmad_vvvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfnmad.vvvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfnmad_vvvvvl(vr1, vr2, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_pvfnmad_vsvvl() {
+  // CHECK-LABEL: @test_pvfnmad_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfnmad.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfnmad_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvfnmad_vsvvvl() {
+  // CHECK-LABEL: @test_pvfnmad_vsvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfnmad.vsvvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfnmad_vsvvvl(v1, vr2, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_pvfnmad_vvsvl() {
+  // CHECK-LABEL: @test_pvfnmad_vvsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfnmad.vvsvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfnmad_vvsvl(vr1, v1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvfnmad_vvsvvl() {
+  // CHECK-LABEL: @test_pvfnmad_vvsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfnmad.vvsvvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfnmad_vvsvvl(vr1, v1, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfnmsbd_vvvvl() {
+  // CHECK-LABEL: @test_vfnmsbd_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmsbd.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmsbd_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfnmsbd_vvvvvl() {
+  // CHECK-LABEL: @test_vfnmsbd_vvvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmsbd.vvvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmsbd_vvvvvl(vr1, vr2, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfnmsbd_vsvvl() {
+  // CHECK-LABEL: @test_vfnmsbd_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmsbd.vsvvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmsbd_vsvvl(vd1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfnmsbd_vsvvvl() {
+  // CHECK-LABEL: @test_vfnmsbd_vsvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmsbd.vsvvvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmsbd_vsvvvl(vd1, vr2, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfnmsbd_vvsvl() {
+  // CHECK-LABEL: @test_vfnmsbd_vvsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmsbd.vvsvl(<256 x double> %{{.*}}, double %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmsbd_vvsvl(vr1, vd1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfnmsbd_vvsvvl() {
+  // CHECK-LABEL: @test_vfnmsbd_vvsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmsbd.vvsvvl(<256 x double> %{{.*}}, double %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmsbd_vvsvvl(vr1, vd1, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfnmsbs_vvvvl() {
+  // CHECK-LABEL: @test_vfnmsbs_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmsbs.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmsbs_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfnmsbs_vvvvvl() {
+  // CHECK-LABEL: @test_vfnmsbs_vvvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmsbs.vvvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmsbs_vvvvvl(vr1, vr2, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfnmsbs_vsvvl() {
+  // CHECK-LABEL: @test_vfnmsbs_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmsbs.vsvvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmsbs_vsvvl(vf1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfnmsbs_vsvvvl() {
+  // CHECK-LABEL: @test_vfnmsbs_vsvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmsbs.vsvvvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmsbs_vsvvvl(vf1, vr2, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vfnmsbs_vvsvl() {
+  // CHECK-LABEL: @test_vfnmsbs_vvsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmsbs.vvsvl(<256 x double> %{{.*}}, float %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmsbs_vvsvl(vr1, vf1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vfnmsbs_vvsvvl() {
+  // CHECK-LABEL: @test_vfnmsbs_vvsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfnmsbs.vvsvvl(<256 x double> %{{.*}}, float %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_vfnmsbs_vvsvvl(vr1, vf1, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_pvfnmsb_vvvvl() {
+  // CHECK-LABEL: @test_pvfnmsb_vvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfnmsb.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfnmsb_vvvvl(vr1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvfnmsb_vvvvvl() {
+  // CHECK-LABEL: @test_pvfnmsb_vvvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfnmsb.vvvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfnmsb_vvvvvl(vr1, vr2, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_pvfnmsb_vsvvl() {
+  // CHECK-LABEL: @test_pvfnmsb_vsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfnmsb.vsvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfnmsb_vsvvl(v1, vr2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvfnmsb_vsvvvl() {
+  // CHECK-LABEL: @test_pvfnmsb_vsvvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfnmsb.vsvvvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfnmsb_vsvvvl(v1, vr2, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_pvfnmsb_vvsvl() {
+  // CHECK-LABEL: @test_pvfnmsb_vvsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfnmsb.vvsvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfnmsb_vvsvl(vr1, v1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_pvfnmsb_vvsvvl() {
+  // CHECK-LABEL: @test_pvfnmsb_vvsvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvfnmsb.vvsvvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr4 = _vel_pvfnmsb_vvsvvl(vr1, v1, vr3, vr4, 256);
+}
+
+void __attribute__((noinline))
+test_vrcpd_vvl() {
+  // CHECK-LABEL: @test_vrcpd_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrcpd.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vrcpd_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vrcpd_vvvl() {
+  // CHECK-LABEL: @test_vrcpd_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrcpd.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vrcpd_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vrcps_vvl() {
+  // CHECK-LABEL: @test_vrcps_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrcps.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vrcps_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vrcps_vvvl() {
+  // CHECK-LABEL: @test_vrcps_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrcps.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vrcps_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvrcp_vvl() {
+  // CHECK-LABEL: @test_pvrcp_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvrcp.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_pvrcp_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_pvrcp_vvvl() {
+  // CHECK-LABEL: @test_pvrcp_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvrcp.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvrcp_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vrsqrtd_vvl() {
+  // CHECK-LABEL: @test_vrsqrtd_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrsqrtd.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vrsqrtd_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vrsqrtd_vvvl() {
+  // CHECK-LABEL: @test_vrsqrtd_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrsqrtd.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vrsqrtd_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vrsqrts_vvl() {
+  // CHECK-LABEL: @test_vrsqrts_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrsqrts.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vrsqrts_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vrsqrts_vvvl() {
+  // CHECK-LABEL: @test_vrsqrts_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrsqrts.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vrsqrts_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvrsqrt_vvl() {
+  // CHECK-LABEL: @test_pvrsqrt_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvrsqrt.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_pvrsqrt_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_pvrsqrt_vvvl() {
+  // CHECK-LABEL: @test_pvrsqrt_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvrsqrt.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvrsqrt_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vrsqrtdnex_vvl() {
+  // CHECK-LABEL: @test_vrsqrtdnex_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrsqrtdnex.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vrsqrtdnex_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vrsqrtdnex_vvvl() {
+  // CHECK-LABEL: @test_vrsqrtdnex_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrsqrtdnex.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vrsqrtdnex_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vrsqrtsnex_vvl() {
+  // CHECK-LABEL: @test_vrsqrtsnex_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrsqrtsnex.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vrsqrtsnex_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vrsqrtsnex_vvvl() {
+  // CHECK-LABEL: @test_vrsqrtsnex_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrsqrtsnex.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vrsqrtsnex_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvrsqrtnex_vvl() {
+  // CHECK-LABEL: @test_pvrsqrtnex_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvrsqrtnex.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_pvrsqrtnex_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_pvrsqrtnex_vvvl() {
+  // CHECK-LABEL: @test_pvrsqrtnex_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvrsqrtnex.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvrsqrtnex_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtwdsx_vvl() {
+  // CHECK-LABEL: @test_vcvtwdsx_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtwdsx.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vcvtwdsx_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtwdsx_vvvl() {
+  // CHECK-LABEL: @test_vcvtwdsx_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtwdsx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcvtwdsx_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtwdsxrz_vvl() {
+  // CHECK-LABEL: @test_vcvtwdsxrz_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtwdsxrz.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vcvtwdsxrz_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtwdsxrz_vvvl() {
+  // CHECK-LABEL: @test_vcvtwdsxrz_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtwdsxrz.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcvtwdsxrz_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtwdzx_vvl() {
+  // CHECK-LABEL: @test_vcvtwdzx_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtwdzx.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vcvtwdzx_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtwdzx_vvvl() {
+  // CHECK-LABEL: @test_vcvtwdzx_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtwdzx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcvtwdzx_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtwdzxrz_vvl() {
+  // CHECK-LABEL: @test_vcvtwdzxrz_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtwdzxrz.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vcvtwdzxrz_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtwdzxrz_vvvl() {
+  // CHECK-LABEL: @test_vcvtwdzxrz_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtwdzxrz.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcvtwdzxrz_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtwssx_vvl() {
+  // CHECK-LABEL: @test_vcvtwssx_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtwssx.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vcvtwssx_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtwssx_vvvl() {
+  // CHECK-LABEL: @test_vcvtwssx_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtwssx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcvtwssx_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtwssxrz_vvl() {
+  // CHECK-LABEL: @test_vcvtwssxrz_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtwssxrz.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vcvtwssxrz_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtwssxrz_vvvl() {
+  // CHECK-LABEL: @test_vcvtwssxrz_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtwssxrz.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcvtwssxrz_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtwszx_vvl() {
+  // CHECK-LABEL: @test_vcvtwszx_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtwszx.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vcvtwszx_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtwszx_vvvl() {
+  // CHECK-LABEL: @test_vcvtwszx_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtwszx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcvtwszx_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtwszxrz_vvl() {
+  // CHECK-LABEL: @test_vcvtwszxrz_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtwszxrz.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vcvtwszxrz_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtwszxrz_vvvl() {
+  // CHECK-LABEL: @test_vcvtwszxrz_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtwszxrz.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcvtwszxrz_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvcvtws_vvl() {
+  // CHECK-LABEL: @test_pvcvtws_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvcvtws.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_pvcvtws_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_pvcvtws_vvvl() {
+  // CHECK-LABEL: @test_pvcvtws_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvcvtws.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvcvtws_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvcvtwsrz_vvl() {
+  // CHECK-LABEL: @test_pvcvtwsrz_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvcvtwsrz.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_pvcvtwsrz_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_pvcvtwsrz_vvvl() {
+  // CHECK-LABEL: @test_pvcvtwsrz_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvcvtwsrz.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvcvtwsrz_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtld_vvl() {
+  // CHECK-LABEL: @test_vcvtld_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtld.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vcvtld_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtld_vvvl() {
+  // CHECK-LABEL: @test_vcvtld_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtld.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcvtld_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtldrz_vvl() {
+  // CHECK-LABEL: @test_vcvtldrz_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtldrz.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vcvtldrz_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtldrz_vvvl() {
+  // CHECK-LABEL: @test_vcvtldrz_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtldrz.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcvtldrz_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtdw_vvl() {
+  // CHECK-LABEL: @test_vcvtdw_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtdw.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vcvtdw_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtdw_vvvl() {
+  // CHECK-LABEL: @test_vcvtdw_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtdw.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcvtdw_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtsw_vvl() {
+  // CHECK-LABEL: @test_vcvtsw_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtsw.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vcvtsw_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtsw_vvvl() {
+  // CHECK-LABEL: @test_vcvtsw_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtsw.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcvtsw_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_pvcvtsw_vvl() {
+  // CHECK-LABEL: @test_pvcvtsw_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvcvtsw.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_pvcvtsw_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_pvcvtsw_vvvl() {
+  // CHECK-LABEL: @test_pvcvtsw_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.pvcvtsw.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_pvcvtsw_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtdl_vvl() {
+  // CHECK-LABEL: @test_vcvtdl_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtdl.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vcvtdl_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtdl_vvvl() {
+  // CHECK-LABEL: @test_vcvtdl_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtdl.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcvtdl_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtds_vvl() {
+  // CHECK-LABEL: @test_vcvtds_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtds.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vcvtds_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtds_vvvl() {
+  // CHECK-LABEL: @test_vcvtds_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtds.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcvtds_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtsd_vvl() {
+  // CHECK-LABEL: @test_vcvtsd_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtsd.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vcvtsd_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vcvtsd_vvvl() {
+  // CHECK-LABEL: @test_vcvtsd_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vcvtsd.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vcvtsd_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vshf_vvvsl() {
+  // CHECK-LABEL: @test_vshf_vvvsl
+  // CHECK: call <256 x double> @llvm.ve.vl.vshf.vvvsl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i32 256)
+  vr3 = _vel_vshf_vvvsl(vr1, vr2, v1, 256);
+}
+
+void __attribute__((noinline))
+test_vshf_vvvsvl() {
+  // CHECK-LABEL: @test_vshf_vvvsvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vshf.vvvsvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vshf_vvvsvl(vr1, vr2, v1, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vsumwsx_vvl() {
+  // CHECK-LABEL: @test_vsumwsx_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsumwsx.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vsumwsx_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vsumwzx_vvl() {
+  // CHECK-LABEL: @test_vsumwzx_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsumwzx.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vsumwzx_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vsuml_vvl() {
+  // CHECK-LABEL: @test_vsuml_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vsuml.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vsuml_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vfsumd_vvl() {
+  // CHECK-LABEL: @test_vfsumd_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfsumd.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vfsumd_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vfsums_vvl() {
+  // CHECK-LABEL: @test_vfsums_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfsums.vvl(<256 x double> %{{.*}}, i32 256)
+  vr2 = _vel_vfsums_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vrmaxswfstsx_vvl() {
+  // CHECK-LABEL: @test_vrmaxswfstsx_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrmaxswfstsx.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vrmaxswfstsx_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vrmaxswfstsx_vvvl() {
+  // CHECK-LABEL: @test_vrmaxswfstsx_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrmaxswfstsx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vrmaxswfstsx_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vrmaxswlstsx_vvl() {
+  // CHECK-LABEL: @test_vrmaxswlstsx_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrmaxswlstsx.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vrmaxswlstsx_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vrmaxswlstsx_vvvl() {
+  // CHECK-LABEL: @test_vrmaxswlstsx_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrmaxswlstsx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vrmaxswlstsx_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vrmaxswfstzx_vvl() {
+  // CHECK-LABEL: @test_vrmaxswfstzx_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrmaxswfstzx.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vrmaxswfstzx_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vrmaxswfstzx_vvvl() {
+  // CHECK-LABEL: @test_vrmaxswfstzx_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrmaxswfstzx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vrmaxswfstzx_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vrmaxswlstzx_vvl() {
+  // CHECK-LABEL: @test_vrmaxswlstzx_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrmaxswlstzx.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vrmaxswlstzx_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vrmaxswlstzx_vvvl() {
+  // CHECK-LABEL: @test_vrmaxswlstzx_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrmaxswlstzx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vrmaxswlstzx_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vrminswfstsx_vvl() {
+  // CHECK-LABEL: @test_vrminswfstsx_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrminswfstsx.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vrminswfstsx_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vrminswfstsx_vvvl() {
+  // CHECK-LABEL: @test_vrminswfstsx_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrminswfstsx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vrminswfstsx_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vrminswlstsx_vvl() {
+  // CHECK-LABEL: @test_vrminswlstsx_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrminswlstsx.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vrminswlstsx_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vrminswlstsx_vvvl() {
+  // CHECK-LABEL: @test_vrminswlstsx_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrminswlstsx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vrminswlstsx_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vrminswfstzx_vvl() {
+  // CHECK-LABEL: @test_vrminswfstzx_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrminswfstzx.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vrminswfstzx_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vrminswfstzx_vvvl() {
+  // CHECK-LABEL: @test_vrminswfstzx_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrminswfstzx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vrminswfstzx_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vrminswlstzx_vvl() {
+  // CHECK-LABEL: @test_vrminswlstzx_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrminswlstzx.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vrminswlstzx_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vrminswlstzx_vvvl() {
+  // CHECK-LABEL: @test_vrminswlstzx_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrminswlstzx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vrminswlstzx_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vrmaxslfst_vvl() {
+  // CHECK-LABEL: @test_vrmaxslfst_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrmaxslfst.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vrmaxslfst_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vrmaxslfst_vvvl() {
+  // CHECK-LABEL: @test_vrmaxslfst_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrmaxslfst.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vrmaxslfst_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vrmaxsllst_vvl() {
+  // CHECK-LABEL: @test_vrmaxsllst_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrmaxsllst.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vrmaxsllst_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vrmaxsllst_vvvl() {
+  // CHECK-LABEL: @test_vrmaxsllst_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrmaxsllst.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vrmaxsllst_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vrminslfst_vvl() {
+  // CHECK-LABEL: @test_vrminslfst_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrminslfst.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vrminslfst_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vrminslfst_vvvl() {
+  // CHECK-LABEL: @test_vrminslfst_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrminslfst.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vrminslfst_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vrminsllst_vvl() {
+  // CHECK-LABEL: @test_vrminsllst_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrminsllst.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vrminsllst_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vrminsllst_vvvl() {
+  // CHECK-LABEL: @test_vrminsllst_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrminsllst.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vrminsllst_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfrmaxdfst_vvl() {
+  // CHECK-LABEL: @test_vfrmaxdfst_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfrmaxdfst.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfrmaxdfst_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vfrmaxdfst_vvvl() {
+  // CHECK-LABEL: @test_vfrmaxdfst_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfrmaxdfst.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfrmaxdfst_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfrmaxdlst_vvl() {
+  // CHECK-LABEL: @test_vfrmaxdlst_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfrmaxdlst.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfrmaxdlst_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vfrmaxdlst_vvvl() {
+  // CHECK-LABEL: @test_vfrmaxdlst_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfrmaxdlst.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfrmaxdlst_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfrmaxsfst_vvl() {
+  // CHECK-LABEL: @test_vfrmaxsfst_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfrmaxsfst.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfrmaxsfst_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vfrmaxsfst_vvvl() {
+  // CHECK-LABEL: @test_vfrmaxsfst_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfrmaxsfst.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfrmaxsfst_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfrmaxslst_vvl() {
+  // CHECK-LABEL: @test_vfrmaxslst_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfrmaxslst.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfrmaxslst_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vfrmaxslst_vvvl() {
+  // CHECK-LABEL: @test_vfrmaxslst_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfrmaxslst.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfrmaxslst_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfrmindfst_vvl() {
+  // CHECK-LABEL: @test_vfrmindfst_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfrmindfst.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfrmindfst_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vfrmindfst_vvvl() {
+  // CHECK-LABEL: @test_vfrmindfst_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfrmindfst.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfrmindfst_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfrmindlst_vvl() {
+  // CHECK-LABEL: @test_vfrmindlst_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfrmindlst.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfrmindlst_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vfrmindlst_vvvl() {
+  // CHECK-LABEL: @test_vfrmindlst_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfrmindlst.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfrmindlst_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfrminsfst_vvl() {
+  // CHECK-LABEL: @test_vfrminsfst_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfrminsfst.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfrminsfst_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vfrminsfst_vvvl() {
+  // CHECK-LABEL: @test_vfrminsfst_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfrminsfst.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfrminsfst_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vfrminslst_vvl() {
+  // CHECK-LABEL: @test_vfrminslst_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfrminslst.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfrminslst_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vfrminslst_vvvl() {
+  // CHECK-LABEL: @test_vfrminslst_vvvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vfrminslst.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vfrminslst_vvvl(vr1, vr2, 256);
+}
+
+void __attribute__((noinline))
+test_vrand_vvl() {
+  // CHECK-LABEL: @test_vrand_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrand.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vrand_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vror_vvl() {
+  // CHECK-LABEL: @test_vror_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vror.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vror_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vrxor_vvl() {
+  // CHECK-LABEL: @test_vrxor_vvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vrxor.vvl(<256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vrxor_vvl(vr1, 256);
+}
+
+void __attribute__((noinline))
+test_vgt_vvssl() {
+  // CHECK-LABEL: @test_vgt_vvssl
+  // CHECK: call <256 x double> @llvm.ve.vl.vgt.vvssl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256)
+  vr3 = _vel_vgt_vvssl(vr1, v1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_vgt_vvssvl() {
+  // CHECK-LABEL: @test_vgt_vvssvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vgt.vvssvl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vgt_vvssvl(vr1, v1, v2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vgtnc_vvssl() {
+  // CHECK-LABEL: @test_vgtnc_vvssl
+  // CHECK: call <256 x double> @llvm.ve.vl.vgtnc.vvssl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256)
+  vr3 = _vel_vgtnc_vvssl(vr1, v1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_vgtnc_vvssvl() {
+  // CHECK-LABEL: @test_vgtnc_vvssvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vgtnc.vvssvl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vgtnc_vvssvl(vr1, v1, v2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vgtu_vvssl() {
+  // CHECK-LABEL: @test_vgtu_vvssl
+  // CHECK: call <256 x double> @llvm.ve.vl.vgtu.vvssl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256)
+  vr3 = _vel_vgtu_vvssl(vr1, v1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_vgtu_vvssvl() {
+  // CHECK-LABEL: @test_vgtu_vvssvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vgtu.vvssvl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vgtu_vvssvl(vr1, v1, v2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vgtunc_vvssl() {
+  // CHECK-LABEL: @test_vgtunc_vvssl
+  // CHECK: call <256 x double> @llvm.ve.vl.vgtunc.vvssl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256)
+  vr3 = _vel_vgtunc_vvssl(vr1, v1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_vgtunc_vvssvl() {
+  // CHECK-LABEL: @test_vgtunc_vvssvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vgtunc.vvssvl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vgtunc_vvssvl(vr1, v1, v2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vgtlsx_vvssl() {
+  // CHECK-LABEL: @test_vgtlsx_vvssl
+  // CHECK: call <256 x double> @llvm.ve.vl.vgtlsx.vvssl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256)
+  vr3 = _vel_vgtlsx_vvssl(vr1, v1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_vgtlsx_vvssvl() {
+  // CHECK-LABEL: @test_vgtlsx_vvssvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vgtlsx.vvssvl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vgtlsx_vvssvl(vr1, v1, v2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vgtlsxnc_vvssl() {
+  // CHECK-LABEL: @test_vgtlsxnc_vvssl
+  // CHECK: call <256 x double> @llvm.ve.vl.vgtlsxnc.vvssl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256)
+  vr3 = _vel_vgtlsxnc_vvssl(vr1, v1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_vgtlsxnc_vvssvl() {
+  // CHECK-LABEL: @test_vgtlsxnc_vvssvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vgtlsxnc.vvssvl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vgtlsxnc_vvssvl(vr1, v1, v2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vgtlzx_vvssl() {
+  // CHECK-LABEL: @test_vgtlzx_vvssl
+  // CHECK: call <256 x double> @llvm.ve.vl.vgtlzx.vvssl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256)
+  vr3 = _vel_vgtlzx_vvssl(vr1, v1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_vgtlzx_vvssvl() {
+  // CHECK-LABEL: @test_vgtlzx_vvssvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vgtlzx.vvssvl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vgtlzx_vvssvl(vr1, v1, v2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vgtlzxnc_vvssl() {
+  // CHECK-LABEL: @test_vgtlzxnc_vvssl
+  // CHECK: call <256 x double> @llvm.ve.vl.vgtlzxnc.vvssl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256)
+  vr3 = _vel_vgtlzxnc_vvssl(vr1, v1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_vgtlzxnc_vvssvl() {
+  // CHECK-LABEL: @test_vgtlzxnc_vvssvl
+  // CHECK: call <256 x double> @llvm.ve.vl.vgtlzxnc.vvssvl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, i32 256)
+  vr3 = _vel_vgtlzxnc_vvssvl(vr1, v1, v2, vr3, 256);
+}
+
+void __attribute__((noinline))
+test_vsc_vvssl() {
+  // CHECK-LABEL: @test_vsc_vvssl
+  // CHECK: call void @llvm.ve.vl.vsc.vvssl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256)
+  _vel_vsc_vvssl(vr1, vr2, v1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_vscnc_vvssl() {
+  // CHECK-LABEL: @test_vscnc_vvssl
+  // CHECK: call void @llvm.ve.vl.vscnc.vvssl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256)
+  _vel_vscnc_vvssl(vr1, vr2, v1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_vscot_vvssl() {
+  // CHECK-LABEL: @test_vscot_vvssl
+  // CHECK: call void @llvm.ve.vl.vscot.vvssl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256)
+  _vel_vscot_vvssl(vr1, vr2, v1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_vscncot_vvssl() {
+  // CHECK-LABEL: @test_vscncot_vvssl
+  // CHECK: call void @llvm.ve.vl.vscncot.vvssl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256)
+  _vel_vscncot_vvssl(vr1, vr2, v1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_vscu_vvssl() {
+  // CHECK-LABEL: @test_vscu_vvssl
+  // CHECK: call void @llvm.ve.vl.vscu.vvssl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256)
+  _vel_vscu_vvssl(vr1, vr2, v1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_vscunc_vvssl() {
+  // CHECK-LABEL: @test_vscunc_vvssl
+  // CHECK: call void @llvm.ve.vl.vscunc.vvssl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256)
+  _vel_vscunc_vvssl(vr1, vr2, v1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_vscuot_vvssl() {
+  // CHECK-LABEL: @test_vscuot_vvssl
+  // CHECK: call void @llvm.ve.vl.vscuot.vvssl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256)
+  _vel_vscuot_vvssl(vr1, vr2, v1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_vscuncot_vvssl() {
+  // CHECK-LABEL: @test_vscuncot_vvssl
+  // CHECK: call void @llvm.ve.vl.vscuncot.vvssl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256)
+  _vel_vscuncot_vvssl(vr1, vr2, v1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_vscl_vvssl() {
+  // CHECK-LABEL: @test_vscl_vvssl
+  // CHECK: call void @llvm.ve.vl.vscl.vvssl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256)
+  _vel_vscl_vvssl(vr1, vr2, v1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_vsclnc_vvssl() {
+  // CHECK-LABEL: @test_vsclnc_vvssl
+  // CHECK: call void @llvm.ve.vl.vsclnc.vvssl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256)
+  _vel_vsclnc_vvssl(vr1, vr2, v1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_vsclot_vvssl() {
+  // CHECK-LABEL: @test_vsclot_vvssl
+  // CHECK: call void @llvm.ve.vl.vsclot.vvssl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256)
+  _vel_vsclot_vvssl(vr1, vr2, v1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_vsclncot_vvssl() {
+  // CHECK-LABEL: @test_vsclncot_vvssl
+  // CHECK: call void @llvm.ve.vl.vsclncot.vvssl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256)
+  _vel_vsclncot_vvssl(vr1, vr2, v1, v2, 256);
+}
+
+void __attribute__((noinline))
+test_lcr_sss() {
+  // CHECK-LABEL: @test_lcr_sss
+  // CHECK: call i64 @llvm.ve.vl.lcr.sss(i64 %{{.*}}, i64 %{{.*}})
+  v3 = _vel_lcr_sss(v1, v2);
+}
+
+void __attribute__((noinline))
+test_scr_sss() {
+  // CHECK-LABEL: @test_scr_sss
+  // CHECK: call void @llvm.ve.vl.scr.sss(i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}})
+  _vel_scr_sss(v1, v2, v3);
+}
+
+void __attribute__((noinline))
+test_tscr_ssss() {
+  // CHECK-LABEL: @test_tscr_ssss
+  // CHECK: call i64 @llvm.ve.vl.tscr.ssss(i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}})
+  v3 = _vel_tscr_ssss(v1, v2, v3);
+}
+
+void __attribute__((noinline))
+test_fidcr_sss() {
+  // CHECK-LABEL: @test_fidcr_sss
+  // CHECK: call i64 @llvm.ve.vl.fidcr.sss(i64 %{{.*}}, i32 0)
+  v2 = _vel_fidcr_sss(v1, 0);
+}
+
+void __attribute__((noinline))
+test_fencei() {
+  // CHECK-LABEL: @test_fencei
+  // CHECK: call void @llvm.ve.vl.fencei()
+  _vel_fencei();
+}
+
+void __attribute__((noinline))
+test_fencem_s() {
+  // CHECK-LABEL: @test_fencem_s
+  // CHECK: call void @llvm.ve.vl.fencem.s(i32 0)
+  _vel_fencem_s(0);
+}
+
+void __attribute__((noinline))
+test_fencec_s() {
+  // CHECK-LABEL: @test_fencec_s
+  // CHECK: call void @llvm.ve.vl.fencec.s(i32 0)
+  _vel_fencec_s(0);
+}
+
+void __attribute__((noinline))
+test_svob() {
+  // CHECK-LABEL: @test_svob
+  // CHECK: call void @llvm.ve.vl.svob()
+  _vel_svob();
+}


        


More information about the cfe-commits mailing list