[clang] d6a0560 - [Clang][RISCV] Add custom TableGen backend for riscv-vector intrinsics.

Zakk Chen via cfe-commits cfe-commits at lists.llvm.org
Wed Mar 10 18:44:14 PST 2021


Author: Zakk Chen
Date: 2021-03-10T18:43:43-08:00
New Revision: d6a0560bf258f95f8960f35657a454f26dda5ba3

URL: https://github.com/llvm/llvm-project/commit/d6a0560bf258f95f8960f35657a454f26dda5ba3
DIFF: https://github.com/llvm/llvm-project/commit/d6a0560bf258f95f8960f35657a454f26dda5ba3.diff

LOG: [Clang][RISCV] Add custom TableGen backend for riscv-vector intrinsics.

Demonstrate how to generate vadd/vfadd intrinsic functions

1. add -gen-riscv-vector-builtins for clang builtins.
2. add -gen-riscv-vector-builtin-codegen for clang codegen.
3. add -gen-riscv-vector-header for riscv_vector.h. It also generates
ifdef directives with extension checking, base on D94403.
4. add -gen-riscv-vector-generic-header for riscv_vector_generic.h.
Generate overloading version Header for generic api.
https://github.com/riscv/rvv-intrinsic-doc/blob/master/rvv-intrinsic-rfc.md#c11-generic-interface
5. update tblgen doc for riscv related options.

riscv_vector.td also defines some unused type transformers for vadd,
because I think it could demonstrate how tranfer type work and we need
them for the whole intrinsic functions implementation in the future.

Authored-by: Roger Ferrer Ibanez <rofirrim at gmail.com>
Co-Authored-by: Zakk Chen <zakk.chen at sifive.com>

Reviewed By: jrtc27, craig.topper, HsiangKai, Jim, Paul-C-Anagnostopoulos

Differential Revision: https://reviews.llvm.org/D95016

Added: 
    clang/include/clang/Basic/riscv_vector.td
    clang/test/CodeGen/RISCV/rvv-intrinsics-generic/vadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-generic/vfadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c
    clang/test/Headers/riscv-vector-header.c
    clang/utils/TableGen/RISCVVEmitter.cpp

Modified: 
    clang/include/clang/Basic/BuiltinsRISCV.def
    clang/include/clang/Basic/CMakeLists.txt
    clang/lib/CodeGen/CGBuiltin.cpp
    clang/lib/Headers/CMakeLists.txt
    clang/utils/TableGen/CMakeLists.txt
    clang/utils/TableGen/TableGen.cpp
    clang/utils/TableGen/TableGenBackends.h
    llvm/docs/CommandGuide/tblgen.rst

Removed: 
    clang/test/CodeGen/RISCV/vadd.c


################################################################################
diff  --git a/clang/include/clang/Basic/BuiltinsRISCV.def b/clang/include/clang/Basic/BuiltinsRISCV.def
index 9fbfa9369507..e76c853787c9 100644
--- a/clang/include/clang/Basic/BuiltinsRISCV.def
+++ b/clang/include/clang/Basic/BuiltinsRISCV.def
@@ -15,186 +15,5 @@
 #   define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) BUILTIN(ID, TYPE, ATTRS)
 #endif
 
-#if defined(TARGET_BUILTIN) && !defined(RISCVV_BUILTIN)
-#define RISCVV_BUILTIN(ID, TYPE, ATTRS) TARGET_BUILTIN(ID, TYPE, ATTRS, "experimental-v")
-#endif
-
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8m1_vl, "q8Scq8Scq8Scz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8m1_m_vl, "q8Scq8bq8Scq8Scq8Scz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16m1_vl, "q4Ssq4Ssq4Ssz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16m1_m_vl, "q4Ssq4bq4Ssq4Ssq4Ssz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i32m1_vl, "q2Siq2Siq2Siz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i32m1_m_vl, "q2Siq2bq2Siq2Siq2Siz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i64m1_vl, "q1SWiq1SWiq1SWiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i64m1_m_vl, "q1SWiq1bq1SWiq1SWiq1SWiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8m2_vl, "q16Scq16Scq16Scz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8m2_m_vl, "q16Scq16bq16Scq16Scq16Scz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16m2_vl, "q8Ssq8Ssq8Ssz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16m2_m_vl, "q8Ssq8bq8Ssq8Ssq8Ssz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i32m2_vl, "q4Siq4Siq4Siz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i32m2_m_vl, "q4Siq4bq4Siq4Siq4Siz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i64m2_vl, "q2SWiq2SWiq2SWiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i64m2_m_vl, "q2SWiq2bq2SWiq2SWiq2SWiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8m4_vl, "q32Scq32Scq32Scz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8m4_m_vl, "q32Scq32bq32Scq32Scq32Scz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16m4_vl, "q16Ssq16Ssq16Ssz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16m4_m_vl, "q16Ssq16bq16Ssq16Ssq16Ssz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i32m4_vl, "q8Siq8Siq8Siz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i32m4_m_vl, "q8Siq8bq8Siq8Siq8Siz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i64m4_vl, "q4SWiq4SWiq4SWiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i64m4_m_vl, "q4SWiq4bq4SWiq4SWiq4SWiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8m8_vl, "q64Scq64Scq64Scz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8m8_m_vl, "q64Scq64bq64Scq64Scq64Scz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16m8_vl, "q32Ssq32Ssq32Ssz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16m8_m_vl, "q32Ssq32bq32Ssq32Ssq32Ssz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i32m8_vl, "q16Siq16Siq16Siz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i32m8_m_vl, "q16Siq16bq16Siq16Siq16Siz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i64m8_vl, "q8SWiq8SWiq8SWiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i64m8_m_vl, "q8SWiq8bq8SWiq8SWiq8SWiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8mf2_vl, "q4Scq4Scq4Scz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8mf2_m_vl, "q4Scq4bq4Scq4Scq4Scz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16mf2_vl, "q2Ssq2Ssq2Ssz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16mf2_m_vl, "q2Ssq2bq2Ssq2Ssq2Ssz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i32mf2_vl, "q1Siq1Siq1Siz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i32mf2_m_vl, "q1Siq1bq1Siq1Siq1Siz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8mf4_vl, "q2Scq2Scq2Scz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8mf4_m_vl, "q2Scq2bq2Scq2Scq2Scz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16mf4_vl, "q1Ssq1Ssq1Ssz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16mf4_m_vl, "q1Ssq1bq1Ssq1Ssq1Ssz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8mf8_vl, "q1Scq1Scq1Scz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8mf8_m_vl, "q1Scq1bq1Scq1Scq1Scz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8m1_vl, "q8Scq8ScScz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8m1_m_vl, "q8Scq8bq8Scq8ScScz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16m1_vl, "q4Ssq4SsSsz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16m1_m_vl, "q4Ssq4bq4Ssq4SsSsz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i32m1_vl, "q2Siq2SiSiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i32m1_m_vl, "q2Siq2bq2Siq2SiSiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i64m1_vl, "q1SWiq1SWiSWiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i64m1_m_vl, "q1SWiq1bq1SWiq1SWiSWiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8m2_vl, "q16Scq16ScScz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8m2_m_vl, "q16Scq16bq16Scq16ScScz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16m2_vl, "q8Ssq8SsSsz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16m2_m_vl, "q8Ssq8bq8Ssq8SsSsz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i32m2_vl, "q4Siq4SiSiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i32m2_m_vl, "q4Siq4bq4Siq4SiSiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i64m2_vl, "q2SWiq2SWiSWiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i64m2_m_vl, "q2SWiq2bq2SWiq2SWiSWiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8m4_vl, "q32Scq32ScScz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8m4_m_vl, "q32Scq32bq32Scq32ScScz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16m4_vl, "q16Ssq16SsSsz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16m4_m_vl, "q16Ssq16bq16Ssq16SsSsz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i32m4_vl, "q8Siq8SiSiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i32m4_m_vl, "q8Siq8bq8Siq8SiSiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i64m4_vl, "q4SWiq4SWiSWiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i64m4_m_vl, "q4SWiq4bq4SWiq4SWiSWiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8m8_vl, "q64Scq64ScScz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8m8_m_vl, "q64Scq64bq64Scq64ScScz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16m8_vl, "q32Ssq32SsSsz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16m8_m_vl, "q32Ssq32bq32Ssq32SsSsz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i32m8_vl, "q16Siq16SiSiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i32m8_m_vl, "q16Siq16bq16Siq16SiSiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i64m8_vl, "q8SWiq8SWiSWiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i64m8_m_vl, "q8SWiq8bq8SWiq8SWiSWiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8mf2_vl, "q4Scq4ScScz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8mf2_m_vl, "q4Scq4bq4Scq4ScScz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16mf2_vl, "q2Ssq2SsSsz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16mf2_m_vl, "q2Ssq2bq2Ssq2SsSsz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i32mf2_vl, "q1Siq1SiSiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i32mf2_m_vl, "q1Siq1bq1Siq1SiSiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8mf4_vl, "q2Scq2ScScz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8mf4_m_vl, "q2Scq2bq2Scq2ScScz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16mf4_vl, "q1Ssq1SsSsz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16mf4_m_vl, "q1Ssq1bq1Ssq1SsSsz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8mf8_vl, "q1Scq1ScScz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8mf8_m_vl, "q1Scq1bq1Scq1ScScz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8m1_vl, "q8Ucq8Ucq8Ucz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8m1_m_vl, "q8Ucq8bq8Ucq8Ucq8Ucz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16m1_vl, "q4Usq4Usq4Usz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16m1_m_vl, "q4Usq4bq4Usq4Usq4Usz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u32m1_vl, "q2Uiq2Uiq2Uiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u32m1_m_vl, "q2Uiq2bq2Uiq2Uiq2Uiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u64m1_vl, "q1UWiq1UWiq1UWiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u64m1_m_vl, "q1UWiq1bq1UWiq1UWiq1UWiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8m2_vl, "q16Ucq16Ucq16Ucz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8m2_m_vl, "q16Ucq16bq16Ucq16Ucq16Ucz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16m2_vl, "q8Usq8Usq8Usz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16m2_m_vl, "q8Usq8bq8Usq8Usq8Usz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u32m2_vl, "q4Uiq4Uiq4Uiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u32m2_m_vl, "q4Uiq4bq4Uiq4Uiq4Uiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u64m2_vl, "q2UWiq2UWiq2UWiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u64m2_m_vl, "q2UWiq2bq2UWiq2UWiq2UWiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8m4_vl, "q32Ucq32Ucq32Ucz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8m4_m_vl, "q32Ucq32bq32Ucq32Ucq32Ucz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16m4_vl, "q16Usq16Usq16Usz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16m4_m_vl, "q16Usq16bq16Usq16Usq16Usz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u32m4_vl, "q8Uiq8Uiq8Uiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u32m4_m_vl, "q8Uiq8bq8Uiq8Uiq8Uiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u64m4_vl, "q4UWiq4UWiq4UWiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u64m4_m_vl, "q4UWiq4bq4UWiq4UWiq4UWiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8m8_vl, "q64Ucq64Ucq64Ucz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8m8_m_vl, "q64Ucq64bq64Ucq64Ucq64Ucz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16m8_vl, "q32Usq32Usq32Usz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16m8_m_vl, "q32Usq32bq32Usq32Usq32Usz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u32m8_vl, "q16Uiq16Uiq16Uiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u32m8_m_vl, "q16Uiq16bq16Uiq16Uiq16Uiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u64m8_vl, "q8UWiq8UWiq8UWiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u64m8_m_vl, "q8UWiq8bq8UWiq8UWiq8UWiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8mf2_vl, "q4Ucq4Ucq4Ucz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8mf2_m_vl, "q4Ucq4bq4Ucq4Ucq4Ucz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16mf2_vl, "q2Usq2Usq2Usz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16mf2_m_vl, "q2Usq2bq2Usq2Usq2Usz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u32mf2_vl, "q1Uiq1Uiq1Uiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u32mf2_m_vl, "q1Uiq1bq1Uiq1Uiq1Uiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8mf4_vl, "q2Ucq2Ucq2Ucz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8mf4_m_vl, "q2Ucq2bq2Ucq2Ucq2Ucz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16mf4_vl, "q1Usq1Usq1Usz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16mf4_m_vl, "q1Usq1bq1Usq1Usq1Usz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8mf8_vl, "q1Ucq1Ucq1Ucz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8mf8_m_vl, "q1Ucq1bq1Ucq1Ucq1Ucz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8m1_vl, "q8Ucq8UcUcz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8m1_m_vl, "q8Ucq8bq8Ucq8UcUcz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16m1_vl, "q4Usq4UsUsz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16m1_m_vl, "q4Usq4bq4Usq4UsUsz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u32m1_vl, "q2Uiq2UiUiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u32m1_m_vl, "q2Uiq2bq2Uiq2UiUiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u64m1_vl, "q1UWiq1UWiUWiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u64m1_m_vl, "q1UWiq1bq1UWiq1UWiUWiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8m2_vl, "q16Ucq16UcUcz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8m2_m_vl, "q16Ucq16bq16Ucq16UcUcz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16m2_vl, "q8Usq8UsUsz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16m2_m_vl, "q8Usq8bq8Usq8UsUsz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u32m2_vl, "q4Uiq4UiUiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u32m2_m_vl, "q4Uiq4bq4Uiq4UiUiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u64m2_vl, "q2UWiq2UWiUWiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u64m2_m_vl, "q2UWiq2bq2UWiq2UWiUWiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8m4_vl, "q32Ucq32UcUcz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8m4_m_vl, "q32Ucq32bq32Ucq32UcUcz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16m4_vl, "q16Usq16UsUsz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16m4_m_vl, "q16Usq16bq16Usq16UsUsz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u32m4_vl, "q8Uiq8UiUiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u32m4_m_vl, "q8Uiq8bq8Uiq8UiUiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u64m4_vl, "q4UWiq4UWiUWiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u64m4_m_vl, "q4UWiq4bq4UWiq4UWiUWiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8m8_vl, "q64Ucq64UcUcz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8m8_m_vl, "q64Ucq64bq64Ucq64UcUcz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16m8_vl, "q32Usq32UsUsz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16m8_m_vl, "q32Usq32bq32Usq32UsUsz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u32m8_vl, "q16Uiq16UiUiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u32m8_m_vl, "q16Uiq16bq16Uiq16UiUiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u64m8_vl, "q8UWiq8UWiUWiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u64m8_m_vl, "q8UWiq8bq8UWiq8UWiUWiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8mf2_vl, "q4Ucq4UcUcz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8mf2_m_vl, "q4Ucq4bq4Ucq4UcUcz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16mf2_vl, "q2Usq2UsUsz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16mf2_m_vl, "q2Usq2bq2Usq2UsUsz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u32mf2_vl, "q1Uiq1UiUiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u32mf2_m_vl, "q1Uiq1bq1Uiq1UiUiz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8mf4_vl, "q2Ucq2UcUcz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8mf4_m_vl, "q2Ucq2bq2Ucq2UcUcz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16mf4_vl, "q1Usq1UsUsz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16mf4_m_vl, "q1Usq1bq1Usq1UsUsz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8mf8_vl, "q1Ucq1UcUcz", "n")
-RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8mf8_m_vl, "q1Ucq1bq1Ucq1UcUcz", "n")
+#include "clang/Basic/riscv_vector_builtins.inc"
 
-#undef BUILTIN
-#undef RISCVV_BUILTIN

diff  --git a/clang/include/clang/Basic/CMakeLists.txt b/clang/include/clang/Basic/CMakeLists.txt
index 47a3198a0e91..8cd891385a48 100644
--- a/clang/include/clang/Basic/CMakeLists.txt
+++ b/clang/include/clang/Basic/CMakeLists.txt
@@ -84,3 +84,9 @@ clang_tablegen(arm_cde_builtin_sema.inc -gen-arm-cde-builtin-sema
 clang_tablegen(arm_cde_builtin_aliases.inc -gen-arm-cde-builtin-aliases
   SOURCE arm_cde.td
   TARGET ClangARMCdeBuiltinAliases)
+clang_tablegen(riscv_vector_builtins.inc -gen-riscv-vector-builtins
+  SOURCE riscv_vector.td
+  TARGET ClangRISCVVectorBuiltins)
+clang_tablegen(riscv_vector_builtin_cg.inc -gen-riscv-vector-builtin-codegen
+  SOURCE riscv_vector.td
+  TARGET ClangRISCVVectorBuiltinCG)

diff  --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
new file mode 100644
index 000000000000..c69b5be1798c
--- /dev/null
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -0,0 +1,210 @@
+//==--- riscv_vector.td - RISC-V V-ext Builtin function list --------------===//
+//
+//  Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+//  See https://llvm.org/LICENSE.txt for license information.
+//  SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the builtins for RISC-V V-extension. See:
+//
+//     https://github.com/riscv/rvv-intrinsic-doc
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Instruction definitions
+//===----------------------------------------------------------------------===//
+// Each record of the class RVVBuiltin defines a collection of builtins (i.e.
+// "def vadd : RVVBuiltin" will be used to define things like "vadd_vv_i32m1",
+// "vadd_vv_i32m2", etc).
+//
+// The elements of this collection are defined by an instantiation process the
+// range of which is specified by the cross product of the LMUL attribute and
+// every element in the attribute TypeRange. By default builtins have LMUL = [1,
+// 2, 4, 8, 1/2, 1/4, 1/8] so the process is repeated 7 times. In tablegen we
+// use the Log2LMUL [0, 1, 2, 3, -1, -2, -3] to represent the LMUL.
+//
+// LMUL represents the fact that the types of values used by that builtin are
+// values generated by instructions that are executed under that LMUL. However,
+// this does not mean the builtin is necessarily lowered into an instruction
+// that executes under the specified LMUL. An example where this happens are
+// loads and stores of masks. A mask like `vbool8_t` can be generated, for
+// instance, by comparing two `__rvv_int8m1_t` (this is LMUL=1) or comparing two
+// `__rvv_int16m2_t` (this is LMUL=2). The actual load or store, however, will
+// be performed under LMUL=1 because mask registers are not grouped.
+//
+// TypeRange is a non-empty sequence of basic types:
+//
+//   c: int8_t (i8)
+//   s: int16_t (i16)
+//   i: int32_t (i32)
+//   l: int64_t (i64)
+//   h: float16_t (half)
+//   f: float32_t (float)
+//   d: float64_t (double)
+//
+// This way, given an LMUL, a record with a TypeRange "sil" will cause the
+// definition of 3 builtins. Each type "t" in the TypeRange (in this example
+// they are int16_t, int32_t, int64_t) is used as a parameter that drives the
+// definition of that particular builtin (for the given LMUL).
+//
+// During the instantiation, types can be transformed or modified using type
+// transformers. Given a type "t" the following primitive type transformers can
+// be applied to it to yield another type.
+//
+//   e: type of "t" as is (identity)
+//   v: computes a vector type whose element type is "t" for the current LMUL
+//   w: computes a vector type identical to what 'v' computes except for the
+//      element type which is twice as wide as the element type of 'v'
+//   q: computes a vector type identical to what 'v' computes except for the
+//      element type which is four times as wide as the element type of 'v'
+//   o: computes a vector type identical to what 'v' computes except for the
+//      element type which is eight times as wide as the element type of 'v'
+//   m: computes a vector type identical to what 'v' computes except for the
+//      element type which is bool
+//   0: void type, ignores "t"
+//   z: size_t, ignores "t"
+//   t: ptr
diff _t, ignores "t"
+//   c: uint8_t, ignores "t"
+//
+// So for instance if t is "i", i.e. int, then "e" will yield int again. "v"
+// will yield an RVV vector type (assume LMUL=1), so __rvv_int32m1_t.
+// Accordingly "w" would yield __rvv_int64m2_t.
+//
+// A type transformer can be prefixed by other non-primitive type transformers.
+//
+//   P: constructs a pointer to the current type
+//   C: adds const to the type
+//   K: requires the integer type to be a constant expression
+//   U: given an integer type or vector type, computes its unsigned variant
+//   I: given a vector type, compute the vector type with integer type
+//      elements of the same width
+//   F: given a vector type, compute the vector type with floating-point type
+//      elements of the same width
+//   S: given a vector type, computes its equivalent one for LMUL=1. This is a
+//      no-op if the vector was already LMUL=1
+//
+// Following with the example above, if t is "i", then "Ue" will yield unsigned
+// int and "Fv" will yield __rvv_float32m1_t (again assuming LMUL=1), Fw would
+// yield __rvv_float64m2_t, etc.
+//
+// Each builtin is then defined by applying each type in TypeRange against the
+// sequence of type transformers described in Suffix and Prototype.
+//
+// The name of the builtin is defined by the Name attribute (which defaults to
+// the name of the class) appended (separated with an underscore) the Suffix
+// attribute. For instance with Name="foo", Suffix = "v" and TypeRange = "il",
+// the builtin generated will be __builtin_rvv_foo_i32m1 and
+// __builtin_rvv_foo_i64m1 (under LMUL=1). If Suffix contains more than one
+// type transformer (say "vv") each of the types is separated with an
+// underscore as in "__builtin_rvv_foo_i32m1_i32m1".
+//
+// The C/C++ prototype of the builtin is defined by the Prototype attribute.
+// Prototype is a non-empty sequence of type transformers, the first of which
+// is the return type of the builtin and the rest are the parameters of the
+// builtin, in order. For instance if Prototype is "wvv" and TypeRange is "si"
+// a first builtin will have type
+// __rvv_int32m2_t (__rvv_int16m1_t, __rvv_int16m1_t) and the second builtin
+// will have type __rvv_int64m2_t (__rvv_int32m1_t, __rvv_int32m1_t) (again
+// under LMUL=1).
+//
+// There are a number of attributes that are used to constraint the number and
+// shape of the builtins generated. Refer to the comments below for them.
+class RVVBuiltin<string suffix, string prototype, string type_range,
+                 string managed_suffix = ""> {
+  // Base name that will be prepended in __builtin_rvv_ and appended the
+  // computed Suffix.
+  string Name = NAME;
+
+  // If not empty, each instantiated builtin will have this appended after an
+  // underscore (_). It is instantiated like Prototype.
+  string Suffix = suffix;
+
+  // If empty, default MangledName is sub string of `Name` which end of first
+  // '_'. For example, the default mangled name  is `vadd` for Name `vadd_vv`.
+  // It's used for describe some special naming cases.
+  string MangledName = "";
+
+  // The 
diff erent variants of the builtin, parameterised with a type.
+  string TypeRange = type_range;
+
+  // We use each type described in TypeRange and LMUL with prototype to
+  // instantiate a specific element of the set of builtins being defined.
+  // Prototype attribute defines the C/C++ prototype of the builtin. It is a
+  // non-empty sequence of type transformers, the first of which is the return
+  // type of the builtin and the rest are the parameters of the builtin, in
+  // order. For instance if Prototype is "wvv", TypeRange is "si" and LMUL=1, a
+  // first builtin will have type
+  // __rvv_int32m2_t (__rvv_int16m1_t, __rvv_int16m1_t), and the second builtin
+  // will have type __rvv_int64m2_t (__rvv_int32m1_t, __rvv_int32m1_t).
+  string Prototype = prototype;
+
+  // This builtin has a masked form.
+  bit HasMask = true;
+
+  // If HasMask, this flag states that this builtin has a maskedoff operand. It
+  // is always the first operand in builtin and IR intrinsic.
+  bit HasMaskedOffOperand = true;
+
+  // This builtin has a granted vector length parameter in the last position.
+  bit HasVL = true;
+
+  // This builtin supports function overloading and has a mangled name.
+  bit HasGeneric = true;
+
+  // Reads or writes "memory" or has other side-effects.
+  bit HasSideEffects = false;
+
+  // This builtin is valid for the given Log2LMULs.
+  list<int> Log2LMUL = [0, 1, 2, 3, -1, -2, -3];
+
+  // Emit the automatic clang codegen. It describes what types we have to use
+  // to obtain the specific LLVM intrinsic. -1 means the return type, otherwise,
+  // k >= 0 meaning the k-th operand (counting from zero) of the codegen'd
+  // parameter of the unmasked version. k can't be the mask operand's position.
+  list<int> IntrinsicTypes = [];
+
+  // If these names are not empty, this is the ID of the LLVM intrinsic
+  // we want to lower to.
+  string IRName = NAME;
+
+  // If HasMask, this is the ID of the LLVM intrinsic we want to lower to.
+  string IRNameMask = NAME #"_mask";
+}
+
+//===----------------------------------------------------------------------===//
+// Basic classes with automatic codegen.
+//===----------------------------------------------------------------------===//
+
+class RVVBinBuiltin<string suffix, string prototype, string type_range>
+    : RVVBuiltin<suffix, prototype, type_range> {
+  let IntrinsicTypes = [-1, 1];
+}
+
+multiclass RVVBinBuiltinSet<string intrinsic_name, string type_range,
+                            list<list<string>> suffixes_prototypes> {
+  let IRName = intrinsic_name, IRNameMask = intrinsic_name # "_mask" in {
+    foreach s_p = suffixes_prototypes in {
+      let Name = NAME # "_" # s_p[0] in {
+        defvar suffix = s_p[1];
+        defvar prototype = s_p[2];
+        def : RVVBinBuiltin<suffix, prototype, type_range>;
+      }
+    }
+  }
+}
+
+// 12. Vector Integer Arithmetic Instructions
+// 12.1. Vector Single-Width Integer Add and Subtract
+defm vadd : RVVBinBuiltinSet<"vadd", "csil",
+                             [["vv", "v", "vvv"],
+                              ["vx", "v", "vve"],
+                              ["vv", "Uv", "UvUvUv"],
+                              ["vx", "Uv", "UvUvUe"]]>;
+
+// 14. Vector Floating-Point Instructions
+// 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions
+defm vfadd : RVVBinBuiltinSet<"vfadd", "fd",
+                              [["vv", "v", "vvv"],
+                               ["vf", "v", "vve"]]>;

diff  --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index d24326162290..ac10fabaea39 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -17820,196 +17820,7 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
   // Required for overloaded intrinsics.
   llvm::SmallVector<llvm::Type *, 2> IntrinsicTypes;
   switch (BuiltinID) {
-  // We could generate all the possible combinations and handling code in
-  // a file and include it here, instead of listing all the builtins plainly.
-  // Something like
-  // #include clang/Basic/RISCVVBuiltinCodeGen.inc
-  case RISCV::BI__builtin_rvv_vadd_vv_i8m1_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i16m1_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i32m1_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i64m1_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i8m2_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i16m2_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i32m2_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i64m2_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i8m4_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i16m4_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i32m4_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i64m4_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i8m8_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i16m8_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i32m8_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i64m8_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i8mf2_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i16mf2_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i32mf2_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i8mf4_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i16mf4_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i8mf8_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i8m1_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i16m1_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i32m1_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i64m1_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i8m2_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i16m2_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i32m2_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i64m2_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i8m4_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i16m4_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i32m4_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i64m4_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i8m8_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i16m8_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i32m8_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i64m8_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i8mf2_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i16mf2_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i32mf2_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i8mf4_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i16mf4_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i8mf8_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u8m1_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u16m1_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u32m1_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u64m1_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u8m2_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u16m2_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u32m2_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u64m2_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u8m4_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u16m4_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u32m4_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u64m4_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u8m8_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u16m8_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u32m8_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u64m8_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u8mf2_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u16mf2_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u32mf2_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u8mf4_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u16mf4_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u8mf8_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u8m1_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u16m1_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u32m1_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u64m1_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u8m2_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u16m2_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u32m2_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u64m2_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u8m4_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u16m4_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u32m4_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u64m4_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u8m8_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u16m8_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u32m8_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u64m8_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u8mf2_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u16mf2_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u32mf2_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u8mf4_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u16mf4_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u8mf8_vl:
-    // The order of operands is (op1, op2, vl).
-    ID = Intrinsic::riscv_vadd;
-    IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[2]->getType()};
-    break;
-  case RISCV::BI__builtin_rvv_vadd_vv_i8m1_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i16m1_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i32m1_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i64m1_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i8m2_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i16m2_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i32m2_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i64m2_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i8m4_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i16m4_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i32m4_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i64m4_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i8m8_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i16m8_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i32m8_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i64m8_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i8mf2_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i16mf2_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i32mf2_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i8mf4_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i16mf4_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_i8mf8_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i8m1_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i16m1_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i32m1_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i64m1_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i8m2_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i16m2_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i32m2_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i64m2_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i8m4_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i16m4_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i32m4_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i64m4_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i8m8_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i16m8_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i32m8_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i64m8_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i8mf2_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i16mf2_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i32mf2_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i8mf4_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i16mf4_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_i8mf8_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u8m1_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u16m1_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u32m1_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u64m1_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u8m2_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u16m2_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u32m2_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u64m2_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u8m4_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u16m4_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u32m4_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u64m4_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u8m8_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u16m8_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u32m8_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u64m8_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u8mf2_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u16mf2_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u32mf2_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u8mf4_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u16mf4_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vv_u8mf8_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u8m1_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u16m1_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u32m1_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u64m1_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u8m2_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u16m2_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u32m2_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u64m2_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u8m4_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u16m4_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u32m4_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u64m4_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u8m8_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u16m8_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u32m8_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u64m8_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u8mf2_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u16mf2_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u32mf2_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u8mf4_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u16mf4_m_vl:
-  case RISCV::BI__builtin_rvv_vadd_vx_u8mf8_m_vl:
-    ID = Intrinsic::riscv_vadd_mask;
-    // The order of operands is (mask, maskedoff, op1, op2, vl).
-    IntrinsicTypes = {ResultType, Ops[3]->getType(), Ops[4]->getType()};
-    // The order of intrinsic operands is (maskedoff, op1, op2, mask, vl).
-    std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
-    break;
+#include "clang/Basic/riscv_vector_builtin_cg.inc"
   }
 
   assert(ID != Intrinsic::not_intrinsic);

diff  --git a/clang/lib/Headers/CMakeLists.txt b/clang/lib/Headers/CMakeLists.txt
index b2c0ce8dd4a0..e7918f3cab92 100644
--- a/clang/lib/Headers/CMakeLists.txt
+++ b/clang/lib/Headers/CMakeLists.txt
@@ -209,6 +209,10 @@ clang_generate_header(-gen-arm-bf16 arm_bf16.td arm_bf16.h)
 clang_generate_header(-gen-arm-mve-header arm_mve.td arm_mve.h)
 # Generate arm_cde.h
 clang_generate_header(-gen-arm-cde-header arm_cde.td arm_cde.h)
+# Generate riscv_vector.h
+clang_generate_header(-gen-riscv-vector-header riscv_vector.td riscv_vector.h)
+# Generate riscv_vector_generic.h
+clang_generate_header(-gen-riscv-vector-generic-header riscv_vector.td riscv_vector_generic.h)
 
 add_custom_target(clang-resource-headers ALL DEPENDS ${out_files})
 set_target_properties(clang-resource-headers PROPERTIES

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-generic/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-generic/vadd.c
new file mode 100644
index 000000000000..33073587e74c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-generic/vadd.c
@@ -0,0 +1,2476 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -target-feature +experimental-zfh -Werror -Wall -o - %s >/dev/null 2>%t
+// RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t
+
+// ASM-NOT: warning
+#include <riscv_vector_generic.h>
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vadd_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i8mf8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i8mf8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i8mf4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i8mf4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i8mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i8mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i8m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i8m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i8m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i8m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i8m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i8m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i8m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i8m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i8m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i8m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i8m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i8m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i16mf4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i16mf4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i16mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i16mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i16m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i16m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i16m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i16m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i16m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i16m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i32mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i32mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i32m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i32m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i32m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i32m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i32m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i32m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i64m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i64m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i64m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i64m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i64m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i64m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u8mf8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u8mf8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u8mf4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u8mf4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u8mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u8mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u8m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u8m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u8m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u8m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u8m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u8m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u8m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u8m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u8m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u8m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u8m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u8m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u16mf4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u16mf4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u16mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u16mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u16m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u16m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u16m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u16m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u16m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u16m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u32mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u32mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u32m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u32m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u32m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u32m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u32m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u32m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u64m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u64m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u64m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u64m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u64m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u64m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vadd_m(mask, maskedoff, op1, op2, vl);
+}
+

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-generic/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-generic/vfadd.c
new file mode 100644
index 000000000000..d0389a333686
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-generic/vfadd.c
@@ -0,0 +1,516 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -target-feature +experimental-zfh -Werror -Wall -o - %s >/dev/null 2>%t
+// RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t
+
+// ASM-NOT: warning
+#include <riscv_vector_generic.h>
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfadd_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return vfadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfadd_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
+  return vfadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
+  return vfadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
+  return vfadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfadd_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
+  return vfadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfadd_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
+  return vfadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfadd_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
+  return vfadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfadd_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
+  return vfadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfadd_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
+  return vfadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfadd_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
+  return vfadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfadd_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
+  return vfadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfadd_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
+  return vfadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfadd_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
+  return vfadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfadd_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
+  return vfadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfadd_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
+  return vfadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfadd_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
+  return vfadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfadd_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
+  return vfadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
+  return vfadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f32mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return vfadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f32mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
+  return vfadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
+  return vfadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
+  return vfadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f32m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
+  return vfadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f32m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
+  return vfadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f32m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
+  return vfadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f32m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
+  return vfadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f32m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
+  return vfadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f32m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
+  return vfadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
+  return vfadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
+  return vfadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f64m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
+  return vfadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f64m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
+  return vfadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f64m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
+  return vfadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f64m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
+  return vfadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f64m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
+  return vfadd_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f64m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
+  return vfadd_m(mask, maskedoff, op1, op2, vl);
+}
+

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c
new file mode 100644
index 000000000000..d3d73e8d7bba
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c
@@ -0,0 +1,2476 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -target-feature +experimental-zfh -Werror -Wall -o - %s >/dev/null 2>%t
+// RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+  return vadd_vv_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+  return vadd_vx_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+  return vadd_vv_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+  return vadd_vx_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+  return vadd_vv_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+  return vadd_vx_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+  return vadd_vv_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+  return vadd_vx_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+  return vadd_vv_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+  return vadd_vx_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+  return vadd_vv_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+  return vadd_vx_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+  return vadd_vv_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+  return vadd_vx_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+  return vadd_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+  return vadd_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+  return vadd_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+  return vadd_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+  return vadd_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+  return vadd_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+  return vadd_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+  return vadd_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+  return vadd_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+  return vadd_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+  return vadd_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+  return vadd_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vadd_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vadd_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+  return vadd_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+  return vadd_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+  return vadd_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+  return vadd_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+  return vadd_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+  return vadd_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+  return vadd_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+  return vadd_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vadd_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+  return vadd_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vadd_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+  return vadd_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vadd_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+  return vadd_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vadd_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+  return vadd_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+  return vadd_vv_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u8mf8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+  return vadd_vx_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+  return vadd_vv_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u8mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+  return vadd_vx_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+  return vadd_vv_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u8mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+  return vadd_vx_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+  return vadd_vv_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+  return vadd_vx_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+  return vadd_vv_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u8m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+  return vadd_vx_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+  return vadd_vv_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u8m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+  return vadd_vx_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+  return vadd_vv_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u8m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+  return vadd_vx_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+  return vadd_vv_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+  return vadd_vx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+  return vadd_vv_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+  return vadd_vx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+  return vadd_vv_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+  return vadd_vx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+  return vadd_vv_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+  return vadd_vx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+  return vadd_vv_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+  return vadd_vx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+  return vadd_vv_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+  return vadd_vx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vadd_vv_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vadd_vx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vadd_vv_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+  return vadd_vx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vadd_vv_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+  return vadd_vx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vadd_vv_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+  return vadd_vx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vadd_vv_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+  return vadd_vx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vadd_vv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vadd_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vadd_vv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vadd_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vadd_vv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vadd_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vadd_vv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vadd_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vadd_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i8mf8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+  return vadd_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i8mf8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+  return vadd_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i8mf4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+  return vadd_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i8mf4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+  return vadd_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i8mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+  return vadd_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i8mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+  return vadd_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+  return vadd_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+  return vadd_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i8m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i8m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+  return vadd_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i8m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i8m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+  return vadd_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i8m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i8m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+  return vadd_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i8m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i8m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+  return vadd_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i8m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i8m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+  return vadd_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i8m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i8m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+  return vadd_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i16mf4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+  return vadd_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i16mf4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+  return vadd_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i16mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+  return vadd_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i16mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+  return vadd_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+  return vadd_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+  return vadd_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i16m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+  return vadd_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i16m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+  return vadd_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i16m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+  return vadd_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i16m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+  return vadd_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i16m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+  return vadd_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i16m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+  return vadd_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i32mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vadd_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i32mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vadd_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+  return vadd_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+  return vadd_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i32m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+  return vadd_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i32m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+  return vadd_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i32m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+  return vadd_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i32m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+  return vadd_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i32m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+  return vadd_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i32m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+  return vadd_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vadd_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+  return vadd_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i64m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vadd_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i64m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+  return vadd_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i64m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vadd_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i64m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+  return vadd_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_i64m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_i64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vadd_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_i64m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_i64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+  return vadd_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u8mf8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+  return vadd_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u8mf8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+  return vadd_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u8mf4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+  return vadd_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u8mf4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+  return vadd_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u8mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+  return vadd_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u8mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+  return vadd_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+  return vadd_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+  return vadd_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u8m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u8m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+  return vadd_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u8m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u8m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+  return vadd_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u8m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u8m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+  return vadd_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u8m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u8m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+  return vadd_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u8m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u8m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+  return vadd_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u8m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u8m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+  return vadd_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u16mf4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+  return vadd_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u16mf4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+  return vadd_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u16mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+  return vadd_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u16mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+  return vadd_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+  return vadd_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+  return vadd_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u16m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+  return vadd_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u16m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+  return vadd_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u16m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+  return vadd_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u16m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+  return vadd_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u16m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+  return vadd_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u16m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+  return vadd_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u32mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vadd_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u32mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vadd_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vadd_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+  return vadd_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u32m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vadd_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u32m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+  return vadd_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u32m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vadd_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u32m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+  return vadd_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u32m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vadd_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u32m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+  return vadd_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vadd_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vadd_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u64m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vadd_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u64m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vadd_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u64m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vadd_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u64m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vadd_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vv_u64m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vv_u64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vadd_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadd_vx_u64m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadd_vx_u64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vadd_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c
new file mode 100644
index 000000000000..2c8f0e54afcf
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c
@@ -0,0 +1,516 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -target-feature +experimental-zfh -Werror -Wall -o - %s >/dev/null 2>%t
+// RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfadd_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return vfadd_vv_f32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfadd_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
+  return vfadd_vf_f32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
+  return vfadd_vv_f32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
+  return vfadd_vf_f32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfadd_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
+  return vfadd_vv_f32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfadd_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
+  return vfadd_vf_f32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfadd_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
+  return vfadd_vv_f32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfadd_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
+  return vfadd_vf_f32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfadd_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
+  return vfadd_vv_f32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfadd_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
+  return vfadd_vf_f32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfadd_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
+  return vfadd_vv_f64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfadd_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
+  return vfadd_vf_f64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfadd_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
+  return vfadd_vv_f64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfadd_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
+  return vfadd_vf_f64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfadd_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
+  return vfadd_vv_f64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfadd_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
+  return vfadd_vf_f64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfadd_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
+  return vfadd_vv_f64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
+  return vfadd_vf_f64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f32mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return vfadd_vv_f32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f32mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
+  return vfadd_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
+  return vfadd_vv_f32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
+  return vfadd_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f32m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
+  return vfadd_vv_f32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f32m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
+  return vfadd_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f32m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
+  return vfadd_vv_f32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f32m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
+  return vfadd_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f32m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
+  return vfadd_vv_f32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f32m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
+  return vfadd_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
+  return vfadd_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
+  return vfadd_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f64m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
+  return vfadd_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f64m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
+  return vfadd_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f64m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
+  return vfadd_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f64m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
+  return vfadd_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vv_f64m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
+  return vfadd_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfadd_vf_f64m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
+  return vfadd_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
+}
+

diff  --git a/clang/test/CodeGen/RISCV/vadd.c b/clang/test/CodeGen/RISCV/vadd.c
deleted file mode 100644
index cc2e52f725a6..000000000000
--- a/clang/test/CodeGen/RISCV/vadd.c
+++ /dev/null
@@ -1,2648 +0,0 @@
-// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
-// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v \
-// RUN:   -O2 -emit-llvm %s -o - | FileCheck --check-prefix=CHECK-RV64-O2 %s
-// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v \
-// RUN:   -O2 -emit-llvm %s -o - | FileCheck --check-prefix=CHECK-RV32-O2 %s
-
-#include <stddef.h>
-#include <stdint.h>
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m1_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ARG_0:%.*]], <vscale x 8 x i8> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m1_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ARG_0:%.*]], <vscale x 8 x i8> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
-//
-__rvv_int8m1_t test_vadd_vv_i8m1_vl(__rvv_int8m1_t arg_0, __rvv_int8m1_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_i8m1_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m1_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ARG_1:%.*]], <vscale x 8 x i8> [[ARG_2:%.*]], <vscale x 8 x i8> [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m1_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ARG_1:%.*]], <vscale x 8 x i8> [[ARG_2:%.*]], <vscale x 8 x i8> [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
-//
-__rvv_int8m1_t test_vadd_vv_i8m1_m_vl(__rvv_bool8_t arg_0, __rvv_int8m1_t arg_1, __rvv_int8m1_t arg_2, __rvv_int8m1_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_i8m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m1_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ARG_0:%.*]], <vscale x 4 x i16> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m1_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ARG_0:%.*]], <vscale x 4 x i16> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
-//
-__rvv_int16m1_t test_vadd_vv_i16m1_vl(__rvv_int16m1_t arg_0, __rvv_int16m1_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_i16m1_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m1_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ARG_1:%.*]], <vscale x 4 x i16> [[ARG_2:%.*]], <vscale x 4 x i16> [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m1_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ARG_1:%.*]], <vscale x 4 x i16> [[ARG_2:%.*]], <vscale x 4 x i16> [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
-//
-__rvv_int16m1_t test_vadd_vv_i16m1_m_vl(__rvv_bool16_t arg_0, __rvv_int16m1_t arg_1, __rvv_int16m1_t arg_2, __rvv_int16m1_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_i16m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m1_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ARG_0:%.*]], <vscale x 2 x i32> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m1_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ARG_0:%.*]], <vscale x 2 x i32> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
-//
-__rvv_int32m1_t test_vadd_vv_i32m1_vl(__rvv_int32m1_t arg_0, __rvv_int32m1_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_i32m1_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m1_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ARG_1:%.*]], <vscale x 2 x i32> [[ARG_2:%.*]], <vscale x 2 x i32> [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m1_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ARG_1:%.*]], <vscale x 2 x i32> [[ARG_2:%.*]], <vscale x 2 x i32> [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
-//
-__rvv_int32m1_t test_vadd_vv_i32m1_m_vl(__rvv_bool32_t arg_0, __rvv_int32m1_t arg_1, __rvv_int32m1_t arg_2, __rvv_int32m1_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_i32m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m1_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ARG_0:%.*]], <vscale x 1 x i64> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m1_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ARG_0:%.*]], <vscale x 1 x i64> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
-//
-__rvv_int64m1_t test_vadd_vv_i64m1_vl(__rvv_int64m1_t arg_0, __rvv_int64m1_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_i64m1_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m1_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ARG_1:%.*]], <vscale x 1 x i64> [[ARG_2:%.*]], <vscale x 1 x i64> [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m1_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ARG_1:%.*]], <vscale x 1 x i64> [[ARG_2:%.*]], <vscale x 1 x i64> [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
-//
-__rvv_int64m1_t test_vadd_vv_i64m1_m_vl(__rvv_bool64_t arg_0, __rvv_int64m1_t arg_1, __rvv_int64m1_t arg_2, __rvv_int64m1_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_i64m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m2_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ARG_0:%.*]], <vscale x 16 x i8> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m2_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ARG_0:%.*]], <vscale x 16 x i8> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
-//
-__rvv_int8m2_t test_vadd_vv_i8m2_vl(__rvv_int8m2_t arg_0, __rvv_int8m2_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_i8m2_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m2_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ARG_1:%.*]], <vscale x 16 x i8> [[ARG_2:%.*]], <vscale x 16 x i8> [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m2_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ARG_1:%.*]], <vscale x 16 x i8> [[ARG_2:%.*]], <vscale x 16 x i8> [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
-//
-__rvv_int8m2_t test_vadd_vv_i8m2_m_vl(__rvv_bool4_t arg_0, __rvv_int8m2_t arg_1, __rvv_int8m2_t arg_2, __rvv_int8m2_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_i8m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m2_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ARG_0:%.*]], <vscale x 8 x i16> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m2_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ARG_0:%.*]], <vscale x 8 x i16> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
-//
-__rvv_int16m2_t test_vadd_vv_i16m2_vl(__rvv_int16m2_t arg_0, __rvv_int16m2_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_i16m2_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m2_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ARG_1:%.*]], <vscale x 8 x i16> [[ARG_2:%.*]], <vscale x 8 x i16> [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m2_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ARG_1:%.*]], <vscale x 8 x i16> [[ARG_2:%.*]], <vscale x 8 x i16> [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
-//
-__rvv_int16m2_t test_vadd_vv_i16m2_m_vl(__rvv_bool8_t arg_0, __rvv_int16m2_t arg_1, __rvv_int16m2_t arg_2, __rvv_int16m2_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_i16m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m2_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ARG_0:%.*]], <vscale x 4 x i32> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m2_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ARG_0:%.*]], <vscale x 4 x i32> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
-//
-__rvv_int32m2_t test_vadd_vv_i32m2_vl(__rvv_int32m2_t arg_0, __rvv_int32m2_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_i32m2_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m2_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ARG_1:%.*]], <vscale x 4 x i32> [[ARG_2:%.*]], <vscale x 4 x i32> [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m2_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ARG_1:%.*]], <vscale x 4 x i32> [[ARG_2:%.*]], <vscale x 4 x i32> [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
-//
-__rvv_int32m2_t test_vadd_vv_i32m2_m_vl(__rvv_bool16_t arg_0, __rvv_int32m2_t arg_1, __rvv_int32m2_t arg_2, __rvv_int32m2_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_i32m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m2_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ARG_0:%.*]], <vscale x 2 x i64> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m2_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ARG_0:%.*]], <vscale x 2 x i64> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
-//
-__rvv_int64m2_t test_vadd_vv_i64m2_vl(__rvv_int64m2_t arg_0, __rvv_int64m2_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_i64m2_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m2_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ARG_1:%.*]], <vscale x 2 x i64> [[ARG_2:%.*]], <vscale x 2 x i64> [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m2_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ARG_1:%.*]], <vscale x 2 x i64> [[ARG_2:%.*]], <vscale x 2 x i64> [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
-//
-__rvv_int64m2_t test_vadd_vv_i64m2_m_vl(__rvv_bool32_t arg_0, __rvv_int64m2_t arg_1, __rvv_int64m2_t arg_2, __rvv_int64m2_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_i64m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m4_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ARG_0:%.*]], <vscale x 32 x i8> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m4_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ARG_0:%.*]], <vscale x 32 x i8> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
-//
-__rvv_int8m4_t test_vadd_vv_i8m4_vl(__rvv_int8m4_t arg_0, __rvv_int8m4_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_i8m4_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m4_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ARG_1:%.*]], <vscale x 32 x i8> [[ARG_2:%.*]], <vscale x 32 x i8> [[ARG_3:%.*]], <vscale x 32 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m4_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ARG_1:%.*]], <vscale x 32 x i8> [[ARG_2:%.*]], <vscale x 32 x i8> [[ARG_3:%.*]], <vscale x 32 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
-//
-__rvv_int8m4_t test_vadd_vv_i8m4_m_vl(__rvv_bool2_t arg_0, __rvv_int8m4_t arg_1, __rvv_int8m4_t arg_2, __rvv_int8m4_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_i8m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m4_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ARG_0:%.*]], <vscale x 16 x i16> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m4_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ARG_0:%.*]], <vscale x 16 x i16> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
-//
-__rvv_int16m4_t test_vadd_vv_i16m4_vl(__rvv_int16m4_t arg_0, __rvv_int16m4_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_i16m4_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m4_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ARG_1:%.*]], <vscale x 16 x i16> [[ARG_2:%.*]], <vscale x 16 x i16> [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m4_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ARG_1:%.*]], <vscale x 16 x i16> [[ARG_2:%.*]], <vscale x 16 x i16> [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
-//
-__rvv_int16m4_t test_vadd_vv_i16m4_m_vl(__rvv_bool4_t arg_0, __rvv_int16m4_t arg_1, __rvv_int16m4_t arg_2, __rvv_int16m4_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_i16m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m4_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ARG_0:%.*]], <vscale x 8 x i32> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m4_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ARG_0:%.*]], <vscale x 8 x i32> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
-//
-__rvv_int32m4_t test_vadd_vv_i32m4_vl(__rvv_int32m4_t arg_0, __rvv_int32m4_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_i32m4_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m4_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ARG_1:%.*]], <vscale x 8 x i32> [[ARG_2:%.*]], <vscale x 8 x i32> [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m4_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ARG_1:%.*]], <vscale x 8 x i32> [[ARG_2:%.*]], <vscale x 8 x i32> [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
-//
-__rvv_int32m4_t test_vadd_vv_i32m4_m_vl(__rvv_bool8_t arg_0, __rvv_int32m4_t arg_1, __rvv_int32m4_t arg_2, __rvv_int32m4_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_i32m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m4_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ARG_0:%.*]], <vscale x 4 x i64> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m4_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ARG_0:%.*]], <vscale x 4 x i64> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
-//
-__rvv_int64m4_t test_vadd_vv_i64m4_vl(__rvv_int64m4_t arg_0, __rvv_int64m4_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_i64m4_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m4_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ARG_1:%.*]], <vscale x 4 x i64> [[ARG_2:%.*]], <vscale x 4 x i64> [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m4_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ARG_1:%.*]], <vscale x 4 x i64> [[ARG_2:%.*]], <vscale x 4 x i64> [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
-//
-__rvv_int64m4_t test_vadd_vv_i64m4_m_vl(__rvv_bool16_t arg_0, __rvv_int64m4_t arg_1, __rvv_int64m4_t arg_2, __rvv_int64m4_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_i64m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m8_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ARG_0:%.*]], <vscale x 64 x i8> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m8_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ARG_0:%.*]], <vscale x 64 x i8> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
-//
-__rvv_int8m8_t test_vadd_vv_i8m8_vl(__rvv_int8m8_t arg_0, __rvv_int8m8_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_i8m8_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m8_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ARG_1:%.*]], <vscale x 64 x i8> [[ARG_2:%.*]], <vscale x 64 x i8> [[ARG_3:%.*]], <vscale x 64 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m8_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ARG_1:%.*]], <vscale x 64 x i8> [[ARG_2:%.*]], <vscale x 64 x i8> [[ARG_3:%.*]], <vscale x 64 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
-//
-__rvv_int8m8_t test_vadd_vv_i8m8_m_vl(__rvv_bool1_t arg_0, __rvv_int8m8_t arg_1, __rvv_int8m8_t arg_2, __rvv_int8m8_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_i8m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m8_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ARG_0:%.*]], <vscale x 32 x i16> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m8_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ARG_0:%.*]], <vscale x 32 x i16> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
-//
-__rvv_int16m8_t test_vadd_vv_i16m8_vl(__rvv_int16m8_t arg_0, __rvv_int16m8_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_i16m8_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m8_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ARG_1:%.*]], <vscale x 32 x i16> [[ARG_2:%.*]], <vscale x 32 x i16> [[ARG_3:%.*]], <vscale x 32 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m8_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ARG_1:%.*]], <vscale x 32 x i16> [[ARG_2:%.*]], <vscale x 32 x i16> [[ARG_3:%.*]], <vscale x 32 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
-//
-__rvv_int16m8_t test_vadd_vv_i16m8_m_vl(__rvv_bool2_t arg_0, __rvv_int16m8_t arg_1, __rvv_int16m8_t arg_2, __rvv_int16m8_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_i16m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m8_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ARG_0:%.*]], <vscale x 16 x i32> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m8_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ARG_0:%.*]], <vscale x 16 x i32> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
-//
-__rvv_int32m8_t test_vadd_vv_i32m8_vl(__rvv_int32m8_t arg_0, __rvv_int32m8_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_i32m8_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m8_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ARG_1:%.*]], <vscale x 16 x i32> [[ARG_2:%.*]], <vscale x 16 x i32> [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m8_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ARG_1:%.*]], <vscale x 16 x i32> [[ARG_2:%.*]], <vscale x 16 x i32> [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
-//
-__rvv_int32m8_t test_vadd_vv_i32m8_m_vl(__rvv_bool4_t arg_0, __rvv_int32m8_t arg_1, __rvv_int32m8_t arg_2, __rvv_int32m8_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_i32m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m8_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ARG_0:%.*]], <vscale x 8 x i64> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m8_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ARG_0:%.*]], <vscale x 8 x i64> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
-//
-__rvv_int64m8_t test_vadd_vv_i64m8_vl(__rvv_int64m8_t arg_0, __rvv_int64m8_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_i64m8_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m8_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ARG_1:%.*]], <vscale x 8 x i64> [[ARG_2:%.*]], <vscale x 8 x i64> [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m8_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ARG_1:%.*]], <vscale x 8 x i64> [[ARG_2:%.*]], <vscale x 8 x i64> [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
-//
-__rvv_int64m8_t test_vadd_vv_i64m8_m_vl(__rvv_bool8_t arg_0, __rvv_int64m8_t arg_1, __rvv_int64m8_t arg_2, __rvv_int64m8_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_i64m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8mf2_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ARG_0:%.*]], <vscale x 4 x i8> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8mf2_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ARG_0:%.*]], <vscale x 4 x i8> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
-//
-__rvv_int8mf2_t test_vadd_vv_i8mf2_vl(__rvv_int8mf2_t arg_0, __rvv_int8mf2_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_i8mf2_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8mf2_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ARG_1:%.*]], <vscale x 4 x i8> [[ARG_2:%.*]], <vscale x 4 x i8> [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8mf2_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ARG_1:%.*]], <vscale x 4 x i8> [[ARG_2:%.*]], <vscale x 4 x i8> [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
-//
-__rvv_int8mf2_t test_vadd_vv_i8mf2_m_vl(__rvv_bool16_t arg_0, __rvv_int8mf2_t arg_1, __rvv_int8mf2_t arg_2, __rvv_int8mf2_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_i8mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16mf2_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ARG_0:%.*]], <vscale x 2 x i16> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16mf2_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ARG_0:%.*]], <vscale x 2 x i16> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
-//
-__rvv_int16mf2_t test_vadd_vv_i16mf2_vl(__rvv_int16mf2_t arg_0, __rvv_int16mf2_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_i16mf2_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16mf2_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ARG_1:%.*]], <vscale x 2 x i16> [[ARG_2:%.*]], <vscale x 2 x i16> [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16mf2_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ARG_1:%.*]], <vscale x 2 x i16> [[ARG_2:%.*]], <vscale x 2 x i16> [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
-//
-__rvv_int16mf2_t test_vadd_vv_i16mf2_m_vl(__rvv_bool32_t arg_0, __rvv_int16mf2_t arg_1, __rvv_int16mf2_t arg_2, __rvv_int16mf2_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_i16mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32mf2_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ARG_0:%.*]], <vscale x 1 x i32> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32mf2_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ARG_0:%.*]], <vscale x 1 x i32> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
-//
-__rvv_int32mf2_t test_vadd_vv_i32mf2_vl(__rvv_int32mf2_t arg_0, __rvv_int32mf2_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_i32mf2_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32mf2_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ARG_1:%.*]], <vscale x 1 x i32> [[ARG_2:%.*]], <vscale x 1 x i32> [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32mf2_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ARG_1:%.*]], <vscale x 1 x i32> [[ARG_2:%.*]], <vscale x 1 x i32> [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
-//
-__rvv_int32mf2_t test_vadd_vv_i32mf2_m_vl(__rvv_bool64_t arg_0, __rvv_int32mf2_t arg_1, __rvv_int32mf2_t arg_2, __rvv_int32mf2_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_i32mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8mf4_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ARG_0:%.*]], <vscale x 2 x i8> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8mf4_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ARG_0:%.*]], <vscale x 2 x i8> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
-//
-__rvv_int8mf4_t test_vadd_vv_i8mf4_vl(__rvv_int8mf4_t arg_0, __rvv_int8mf4_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_i8mf4_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8mf4_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ARG_1:%.*]], <vscale x 2 x i8> [[ARG_2:%.*]], <vscale x 2 x i8> [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8mf4_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ARG_1:%.*]], <vscale x 2 x i8> [[ARG_2:%.*]], <vscale x 2 x i8> [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
-//
-__rvv_int8mf4_t test_vadd_vv_i8mf4_m_vl(__rvv_bool32_t arg_0, __rvv_int8mf4_t arg_1, __rvv_int8mf4_t arg_2, __rvv_int8mf4_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_i8mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16mf4_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ARG_0:%.*]], <vscale x 1 x i16> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16mf4_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ARG_0:%.*]], <vscale x 1 x i16> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
-//
-__rvv_int16mf4_t test_vadd_vv_i16mf4_vl(__rvv_int16mf4_t arg_0, __rvv_int16mf4_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_i16mf4_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16mf4_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ARG_1:%.*]], <vscale x 1 x i16> [[ARG_2:%.*]], <vscale x 1 x i16> [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16mf4_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ARG_1:%.*]], <vscale x 1 x i16> [[ARG_2:%.*]], <vscale x 1 x i16> [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
-//
-__rvv_int16mf4_t test_vadd_vv_i16mf4_m_vl(__rvv_bool64_t arg_0, __rvv_int16mf4_t arg_1, __rvv_int16mf4_t arg_2, __rvv_int16mf4_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_i16mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8mf8_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ARG_0:%.*]], <vscale x 1 x i8> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8mf8_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ARG_0:%.*]], <vscale x 1 x i8> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
-//
-__rvv_int8mf8_t test_vadd_vv_i8mf8_vl(__rvv_int8mf8_t arg_0, __rvv_int8mf8_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_i8mf8_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8mf8_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ARG_1:%.*]], <vscale x 1 x i8> [[ARG_2:%.*]], <vscale x 1 x i8> [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8mf8_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ARG_1:%.*]], <vscale x 1 x i8> [[ARG_2:%.*]], <vscale x 1 x i8> [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
-//
-__rvv_int8mf8_t test_vadd_vv_i8mf8_m_vl(__rvv_bool64_t arg_0, __rvv_int8mf8_t arg_1, __rvv_int8mf8_t arg_2, __rvv_int8mf8_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_i8mf8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m1_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m1_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i32(<vscale x 8 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
-//
-__rvv_int8m1_t test_vadd_vx_i8m1_vl(__rvv_int8m1_t arg_0, int8_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_i8m1_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m1_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ARG_1:%.*]], <vscale x 8 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m1_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ARG_1:%.*]], <vscale x 8 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
-//
-__rvv_int8m1_t test_vadd_vx_i8m1_m_vl(__rvv_bool8_t arg_0, __rvv_int8m1_t arg_1, __rvv_int8m1_t arg_2, int8_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_i8m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m1_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m1_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i32(<vscale x 4 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
-//
-__rvv_int16m1_t test_vadd_vx_i16m1_vl(__rvv_int16m1_t arg_0, int16_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_i16m1_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m1_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ARG_1:%.*]], <vscale x 4 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m1_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ARG_1:%.*]], <vscale x 4 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
-//
-__rvv_int16m1_t test_vadd_vx_i16m1_m_vl(__rvv_bool16_t arg_0, __rvv_int16m1_t arg_1, __rvv_int16m1_t arg_2, int16_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_i16m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m1_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m1_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i32(<vscale x 2 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
-//
-__rvv_int32m1_t test_vadd_vx_i32m1_vl(__rvv_int32m1_t arg_0, int32_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_i32m1_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m1_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ARG_1:%.*]], <vscale x 2 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m1_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ARG_1:%.*]], <vscale x 2 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
-//
-__rvv_int32m1_t test_vadd_vx_i32m1_m_vl(__rvv_bool32_t arg_0, __rvv_int32m1_t arg_1, __rvv_int32m1_t arg_2, int32_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_i32m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m1_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m1_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i32(<vscale x 1 x i64> [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
-//
-__rvv_int64m1_t test_vadd_vx_i64m1_vl(__rvv_int64m1_t arg_0, int64_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_i64m1_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m1_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ARG_1:%.*]], <vscale x 1 x i64> [[ARG_2:%.*]], i64 [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m1_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ARG_1:%.*]], <vscale x 1 x i64> [[ARG_2:%.*]], i64 [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
-//
-__rvv_int64m1_t test_vadd_vx_i64m1_m_vl(__rvv_bool64_t arg_0, __rvv_int64m1_t arg_1, __rvv_int64m1_t arg_2, int64_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_i64m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m2_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m2_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i32(<vscale x 16 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
-//
-__rvv_int8m2_t test_vadd_vx_i8m2_vl(__rvv_int8m2_t arg_0, int8_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_i8m2_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m2_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ARG_1:%.*]], <vscale x 16 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m2_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ARG_1:%.*]], <vscale x 16 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
-//
-__rvv_int8m2_t test_vadd_vx_i8m2_m_vl(__rvv_bool4_t arg_0, __rvv_int8m2_t arg_1, __rvv_int8m2_t arg_2, int8_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_i8m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m2_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m2_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i32(<vscale x 8 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
-//
-__rvv_int16m2_t test_vadd_vx_i16m2_vl(__rvv_int16m2_t arg_0, int16_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_i16m2_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m2_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ARG_1:%.*]], <vscale x 8 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m2_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ARG_1:%.*]], <vscale x 8 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
-//
-__rvv_int16m2_t test_vadd_vx_i16m2_m_vl(__rvv_bool8_t arg_0, __rvv_int16m2_t arg_1, __rvv_int16m2_t arg_2, int16_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_i16m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m2_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m2_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i32(<vscale x 4 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
-//
-__rvv_int32m2_t test_vadd_vx_i32m2_vl(__rvv_int32m2_t arg_0, int32_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_i32m2_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m2_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ARG_1:%.*]], <vscale x 4 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m2_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ARG_1:%.*]], <vscale x 4 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
-//
-__rvv_int32m2_t test_vadd_vx_i32m2_m_vl(__rvv_bool16_t arg_0, __rvv_int32m2_t arg_1, __rvv_int32m2_t arg_2, int32_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_i32m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m2_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m2_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i32(<vscale x 2 x i64> [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
-//
-__rvv_int64m2_t test_vadd_vx_i64m2_vl(__rvv_int64m2_t arg_0, int64_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_i64m2_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m2_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ARG_1:%.*]], <vscale x 2 x i64> [[ARG_2:%.*]], i64 [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m2_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ARG_1:%.*]], <vscale x 2 x i64> [[ARG_2:%.*]], i64 [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
-//
-__rvv_int64m2_t test_vadd_vx_i64m2_m_vl(__rvv_bool32_t arg_0, __rvv_int64m2_t arg_1, __rvv_int64m2_t arg_2, int64_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_i64m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m4_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m4_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i32(<vscale x 32 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
-//
-__rvv_int8m4_t test_vadd_vx_i8m4_vl(__rvv_int8m4_t arg_0, int8_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_i8m4_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m4_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ARG_1:%.*]], <vscale x 32 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 32 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m4_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ARG_1:%.*]], <vscale x 32 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 32 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
-//
-__rvv_int8m4_t test_vadd_vx_i8m4_m_vl(__rvv_bool2_t arg_0, __rvv_int8m4_t arg_1, __rvv_int8m4_t arg_2, int8_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_i8m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m4_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m4_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i32(<vscale x 16 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
-//
-__rvv_int16m4_t test_vadd_vx_i16m4_vl(__rvv_int16m4_t arg_0, int16_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_i16m4_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m4_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ARG_1:%.*]], <vscale x 16 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m4_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ARG_1:%.*]], <vscale x 16 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
-//
-__rvv_int16m4_t test_vadd_vx_i16m4_m_vl(__rvv_bool4_t arg_0, __rvv_int16m4_t arg_1, __rvv_int16m4_t arg_2, int16_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_i16m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m4_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m4_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i32(<vscale x 8 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
-//
-__rvv_int32m4_t test_vadd_vx_i32m4_vl(__rvv_int32m4_t arg_0, int32_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_i32m4_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m4_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ARG_1:%.*]], <vscale x 8 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m4_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ARG_1:%.*]], <vscale x 8 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
-//
-__rvv_int32m4_t test_vadd_vx_i32m4_m_vl(__rvv_bool8_t arg_0, __rvv_int32m4_t arg_1, __rvv_int32m4_t arg_2, int32_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_i32m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m4_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m4_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i32(<vscale x 4 x i64> [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
-//
-__rvv_int64m4_t test_vadd_vx_i64m4_vl(__rvv_int64m4_t arg_0, int64_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_i64m4_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m4_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ARG_1:%.*]], <vscale x 4 x i64> [[ARG_2:%.*]], i64 [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m4_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ARG_1:%.*]], <vscale x 4 x i64> [[ARG_2:%.*]], i64 [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
-//
-__rvv_int64m4_t test_vadd_vx_i64m4_m_vl(__rvv_bool16_t arg_0, __rvv_int64m4_t arg_1, __rvv_int64m4_t arg_2, int64_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_i64m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m8_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m8_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i32(<vscale x 64 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
-//
-__rvv_int8m8_t test_vadd_vx_i8m8_vl(__rvv_int8m8_t arg_0, int8_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_i8m8_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m8_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ARG_1:%.*]], <vscale x 64 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 64 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m8_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ARG_1:%.*]], <vscale x 64 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 64 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
-//
-__rvv_int8m8_t test_vadd_vx_i8m8_m_vl(__rvv_bool1_t arg_0, __rvv_int8m8_t arg_1, __rvv_int8m8_t arg_2, int8_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_i8m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m8_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m8_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i32(<vscale x 32 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
-//
-__rvv_int16m8_t test_vadd_vx_i16m8_vl(__rvv_int16m8_t arg_0, int16_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_i16m8_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m8_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ARG_1:%.*]], <vscale x 32 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 32 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m8_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ARG_1:%.*]], <vscale x 32 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 32 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
-//
-__rvv_int16m8_t test_vadd_vx_i16m8_m_vl(__rvv_bool2_t arg_0, __rvv_int16m8_t arg_1, __rvv_int16m8_t arg_2, int16_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_i16m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m8_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m8_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i32(<vscale x 16 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
-//
-__rvv_int32m8_t test_vadd_vx_i32m8_vl(__rvv_int32m8_t arg_0, int32_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_i32m8_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m8_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ARG_1:%.*]], <vscale x 16 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m8_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ARG_1:%.*]], <vscale x 16 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
-//
-__rvv_int32m8_t test_vadd_vx_i32m8_m_vl(__rvv_bool4_t arg_0, __rvv_int32m8_t arg_1, __rvv_int32m8_t arg_2, int32_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_i32m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m8_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m8_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i32(<vscale x 8 x i64> [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
-//
-__rvv_int64m8_t test_vadd_vx_i64m8_vl(__rvv_int64m8_t arg_0, int64_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_i64m8_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m8_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ARG_1:%.*]], <vscale x 8 x i64> [[ARG_2:%.*]], i64 [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m8_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ARG_1:%.*]], <vscale x 8 x i64> [[ARG_2:%.*]], i64 [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
-//
-__rvv_int64m8_t test_vadd_vx_i64m8_m_vl(__rvv_bool8_t arg_0, __rvv_int64m8_t arg_1, __rvv_int64m8_t arg_2, int64_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_i64m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8mf2_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8mf2_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i32(<vscale x 4 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
-//
-__rvv_int8mf2_t test_vadd_vx_i8mf2_vl(__rvv_int8mf2_t arg_0, int8_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_i8mf2_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8mf2_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ARG_1:%.*]], <vscale x 4 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8mf2_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ARG_1:%.*]], <vscale x 4 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
-//
-__rvv_int8mf2_t test_vadd_vx_i8mf2_m_vl(__rvv_bool16_t arg_0, __rvv_int8mf2_t arg_1, __rvv_int8mf2_t arg_2, int8_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_i8mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16mf2_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16mf2_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i32(<vscale x 2 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
-//
-__rvv_int16mf2_t test_vadd_vx_i16mf2_vl(__rvv_int16mf2_t arg_0, int16_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_i16mf2_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16mf2_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ARG_1:%.*]], <vscale x 2 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16mf2_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ARG_1:%.*]], <vscale x 2 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
-//
-__rvv_int16mf2_t test_vadd_vx_i16mf2_m_vl(__rvv_bool32_t arg_0, __rvv_int16mf2_t arg_1, __rvv_int16mf2_t arg_2, int16_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_i16mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32mf2_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32mf2_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i32(<vscale x 1 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
-//
-__rvv_int32mf2_t test_vadd_vx_i32mf2_vl(__rvv_int32mf2_t arg_0, int32_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_i32mf2_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32mf2_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ARG_1:%.*]], <vscale x 1 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32mf2_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ARG_1:%.*]], <vscale x 1 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
-//
-__rvv_int32mf2_t test_vadd_vx_i32mf2_m_vl(__rvv_bool64_t arg_0, __rvv_int32mf2_t arg_1, __rvv_int32mf2_t arg_2, int32_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_i32mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8mf4_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8mf4_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i32(<vscale x 2 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
-//
-__rvv_int8mf4_t test_vadd_vx_i8mf4_vl(__rvv_int8mf4_t arg_0, int8_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_i8mf4_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8mf4_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ARG_1:%.*]], <vscale x 2 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8mf4_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ARG_1:%.*]], <vscale x 2 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
-//
-__rvv_int8mf4_t test_vadd_vx_i8mf4_m_vl(__rvv_bool32_t arg_0, __rvv_int8mf4_t arg_1, __rvv_int8mf4_t arg_2, int8_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_i8mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16mf4_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16mf4_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i32(<vscale x 1 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
-//
-__rvv_int16mf4_t test_vadd_vx_i16mf4_vl(__rvv_int16mf4_t arg_0, int16_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_i16mf4_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16mf4_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ARG_1:%.*]], <vscale x 1 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16mf4_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ARG_1:%.*]], <vscale x 1 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
-//
-__rvv_int16mf4_t test_vadd_vx_i16mf4_m_vl(__rvv_bool64_t arg_0, __rvv_int16mf4_t arg_1, __rvv_int16mf4_t arg_2, int16_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_i16mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8mf8_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8mf8_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i32(<vscale x 1 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
-//
-__rvv_int8mf8_t test_vadd_vx_i8mf8_vl(__rvv_int8mf8_t arg_0, int8_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_i8mf8_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8mf8_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ARG_1:%.*]], <vscale x 1 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8mf8_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ARG_1:%.*]], <vscale x 1 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
-//
-__rvv_int8mf8_t test_vadd_vx_i8mf8_m_vl(__rvv_bool64_t arg_0, __rvv_int8mf8_t arg_1, __rvv_int8mf8_t arg_2, int8_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_i8mf8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m1_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ARG_0:%.*]], <vscale x 8 x i8> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m1_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ARG_0:%.*]], <vscale x 8 x i8> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
-//
-__rvv_uint8m1_t test_vadd_vv_u8m1_vl(__rvv_uint8m1_t arg_0, __rvv_uint8m1_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_u8m1_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m1_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ARG_1:%.*]], <vscale x 8 x i8> [[ARG_2:%.*]], <vscale x 8 x i8> [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m1_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ARG_1:%.*]], <vscale x 8 x i8> [[ARG_2:%.*]], <vscale x 8 x i8> [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
-//
-__rvv_uint8m1_t test_vadd_vv_u8m1_m_vl(__rvv_bool8_t arg_0, __rvv_uint8m1_t arg_1, __rvv_uint8m1_t arg_2, __rvv_uint8m1_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_u8m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m1_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ARG_0:%.*]], <vscale x 4 x i16> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m1_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ARG_0:%.*]], <vscale x 4 x i16> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
-//
-__rvv_uint16m1_t test_vadd_vv_u16m1_vl(__rvv_uint16m1_t arg_0, __rvv_uint16m1_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_u16m1_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m1_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ARG_1:%.*]], <vscale x 4 x i16> [[ARG_2:%.*]], <vscale x 4 x i16> [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m1_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ARG_1:%.*]], <vscale x 4 x i16> [[ARG_2:%.*]], <vscale x 4 x i16> [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
-//
-__rvv_uint16m1_t test_vadd_vv_u16m1_m_vl(__rvv_bool16_t arg_0, __rvv_uint16m1_t arg_1, __rvv_uint16m1_t arg_2, __rvv_uint16m1_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_u16m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m1_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ARG_0:%.*]], <vscale x 2 x i32> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m1_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ARG_0:%.*]], <vscale x 2 x i32> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
-//
-__rvv_uint32m1_t test_vadd_vv_u32m1_vl(__rvv_uint32m1_t arg_0, __rvv_uint32m1_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_u32m1_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m1_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ARG_1:%.*]], <vscale x 2 x i32> [[ARG_2:%.*]], <vscale x 2 x i32> [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m1_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ARG_1:%.*]], <vscale x 2 x i32> [[ARG_2:%.*]], <vscale x 2 x i32> [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
-//
-__rvv_uint32m1_t test_vadd_vv_u32m1_m_vl(__rvv_bool32_t arg_0, __rvv_uint32m1_t arg_1, __rvv_uint32m1_t arg_2, __rvv_uint32m1_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_u32m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m1_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ARG_0:%.*]], <vscale x 1 x i64> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m1_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ARG_0:%.*]], <vscale x 1 x i64> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
-//
-__rvv_uint64m1_t test_vadd_vv_u64m1_vl(__rvv_uint64m1_t arg_0, __rvv_uint64m1_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_u64m1_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m1_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ARG_1:%.*]], <vscale x 1 x i64> [[ARG_2:%.*]], <vscale x 1 x i64> [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m1_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ARG_1:%.*]], <vscale x 1 x i64> [[ARG_2:%.*]], <vscale x 1 x i64> [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
-//
-__rvv_uint64m1_t test_vadd_vv_u64m1_m_vl(__rvv_bool64_t arg_0, __rvv_uint64m1_t arg_1, __rvv_uint64m1_t arg_2, __rvv_uint64m1_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_u64m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m2_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ARG_0:%.*]], <vscale x 16 x i8> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m2_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ARG_0:%.*]], <vscale x 16 x i8> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
-//
-__rvv_uint8m2_t test_vadd_vv_u8m2_vl(__rvv_uint8m2_t arg_0, __rvv_uint8m2_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_u8m2_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m2_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ARG_1:%.*]], <vscale x 16 x i8> [[ARG_2:%.*]], <vscale x 16 x i8> [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m2_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ARG_1:%.*]], <vscale x 16 x i8> [[ARG_2:%.*]], <vscale x 16 x i8> [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
-//
-__rvv_uint8m2_t test_vadd_vv_u8m2_m_vl(__rvv_bool4_t arg_0, __rvv_uint8m2_t arg_1, __rvv_uint8m2_t arg_2, __rvv_uint8m2_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_u8m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m2_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ARG_0:%.*]], <vscale x 8 x i16> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m2_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ARG_0:%.*]], <vscale x 8 x i16> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
-//
-__rvv_uint16m2_t test_vadd_vv_u16m2_vl(__rvv_uint16m2_t arg_0, __rvv_uint16m2_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_u16m2_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m2_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ARG_1:%.*]], <vscale x 8 x i16> [[ARG_2:%.*]], <vscale x 8 x i16> [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m2_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ARG_1:%.*]], <vscale x 8 x i16> [[ARG_2:%.*]], <vscale x 8 x i16> [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
-//
-__rvv_uint16m2_t test_vadd_vv_u16m2_m_vl(__rvv_bool8_t arg_0, __rvv_uint16m2_t arg_1, __rvv_uint16m2_t arg_2, __rvv_uint16m2_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_u16m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m2_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ARG_0:%.*]], <vscale x 4 x i32> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m2_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ARG_0:%.*]], <vscale x 4 x i32> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
-//
-__rvv_uint32m2_t test_vadd_vv_u32m2_vl(__rvv_uint32m2_t arg_0, __rvv_uint32m2_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_u32m2_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m2_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ARG_1:%.*]], <vscale x 4 x i32> [[ARG_2:%.*]], <vscale x 4 x i32> [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m2_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ARG_1:%.*]], <vscale x 4 x i32> [[ARG_2:%.*]], <vscale x 4 x i32> [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
-//
-__rvv_uint32m2_t test_vadd_vv_u32m2_m_vl(__rvv_bool16_t arg_0, __rvv_uint32m2_t arg_1, __rvv_uint32m2_t arg_2, __rvv_uint32m2_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_u32m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m2_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ARG_0:%.*]], <vscale x 2 x i64> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m2_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ARG_0:%.*]], <vscale x 2 x i64> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
-//
-__rvv_uint64m2_t test_vadd_vv_u64m2_vl(__rvv_uint64m2_t arg_0, __rvv_uint64m2_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_u64m2_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m2_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ARG_1:%.*]], <vscale x 2 x i64> [[ARG_2:%.*]], <vscale x 2 x i64> [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m2_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ARG_1:%.*]], <vscale x 2 x i64> [[ARG_2:%.*]], <vscale x 2 x i64> [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
-//
-__rvv_uint64m2_t test_vadd_vv_u64m2_m_vl(__rvv_bool32_t arg_0, __rvv_uint64m2_t arg_1, __rvv_uint64m2_t arg_2, __rvv_uint64m2_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_u64m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m4_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ARG_0:%.*]], <vscale x 32 x i8> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m4_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ARG_0:%.*]], <vscale x 32 x i8> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
-//
-__rvv_uint8m4_t test_vadd_vv_u8m4_vl(__rvv_uint8m4_t arg_0, __rvv_uint8m4_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_u8m4_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m4_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ARG_1:%.*]], <vscale x 32 x i8> [[ARG_2:%.*]], <vscale x 32 x i8> [[ARG_3:%.*]], <vscale x 32 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m4_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ARG_1:%.*]], <vscale x 32 x i8> [[ARG_2:%.*]], <vscale x 32 x i8> [[ARG_3:%.*]], <vscale x 32 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
-//
-__rvv_uint8m4_t test_vadd_vv_u8m4_m_vl(__rvv_bool2_t arg_0, __rvv_uint8m4_t arg_1, __rvv_uint8m4_t arg_2, __rvv_uint8m4_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_u8m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m4_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ARG_0:%.*]], <vscale x 16 x i16> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m4_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ARG_0:%.*]], <vscale x 16 x i16> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
-//
-__rvv_uint16m4_t test_vadd_vv_u16m4_vl(__rvv_uint16m4_t arg_0, __rvv_uint16m4_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_u16m4_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m4_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ARG_1:%.*]], <vscale x 16 x i16> [[ARG_2:%.*]], <vscale x 16 x i16> [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m4_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ARG_1:%.*]], <vscale x 16 x i16> [[ARG_2:%.*]], <vscale x 16 x i16> [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
-//
-__rvv_uint16m4_t test_vadd_vv_u16m4_m_vl(__rvv_bool4_t arg_0, __rvv_uint16m4_t arg_1, __rvv_uint16m4_t arg_2, __rvv_uint16m4_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_u16m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m4_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ARG_0:%.*]], <vscale x 8 x i32> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m4_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ARG_0:%.*]], <vscale x 8 x i32> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
-//
-__rvv_uint32m4_t test_vadd_vv_u32m4_vl(__rvv_uint32m4_t arg_0, __rvv_uint32m4_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_u32m4_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m4_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ARG_1:%.*]], <vscale x 8 x i32> [[ARG_2:%.*]], <vscale x 8 x i32> [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m4_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ARG_1:%.*]], <vscale x 8 x i32> [[ARG_2:%.*]], <vscale x 8 x i32> [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
-//
-__rvv_uint32m4_t test_vadd_vv_u32m4_m_vl(__rvv_bool8_t arg_0, __rvv_uint32m4_t arg_1, __rvv_uint32m4_t arg_2, __rvv_uint32m4_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_u32m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m4_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ARG_0:%.*]], <vscale x 4 x i64> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m4_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ARG_0:%.*]], <vscale x 4 x i64> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
-//
-__rvv_uint64m4_t test_vadd_vv_u64m4_vl(__rvv_uint64m4_t arg_0, __rvv_uint64m4_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_u64m4_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m4_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ARG_1:%.*]], <vscale x 4 x i64> [[ARG_2:%.*]], <vscale x 4 x i64> [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m4_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ARG_1:%.*]], <vscale x 4 x i64> [[ARG_2:%.*]], <vscale x 4 x i64> [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
-//
-__rvv_uint64m4_t test_vadd_vv_u64m4_m_vl(__rvv_bool16_t arg_0, __rvv_uint64m4_t arg_1, __rvv_uint64m4_t arg_2, __rvv_uint64m4_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_u64m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m8_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ARG_0:%.*]], <vscale x 64 x i8> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m8_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ARG_0:%.*]], <vscale x 64 x i8> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
-//
-__rvv_uint8m8_t test_vadd_vv_u8m8_vl(__rvv_uint8m8_t arg_0, __rvv_uint8m8_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_u8m8_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m8_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ARG_1:%.*]], <vscale x 64 x i8> [[ARG_2:%.*]], <vscale x 64 x i8> [[ARG_3:%.*]], <vscale x 64 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m8_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ARG_1:%.*]], <vscale x 64 x i8> [[ARG_2:%.*]], <vscale x 64 x i8> [[ARG_3:%.*]], <vscale x 64 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
-//
-__rvv_uint8m8_t test_vadd_vv_u8m8_m_vl(__rvv_bool1_t arg_0, __rvv_uint8m8_t arg_1, __rvv_uint8m8_t arg_2, __rvv_uint8m8_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_u8m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m8_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ARG_0:%.*]], <vscale x 32 x i16> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m8_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ARG_0:%.*]], <vscale x 32 x i16> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
-//
-__rvv_uint16m8_t test_vadd_vv_u16m8_vl(__rvv_uint16m8_t arg_0, __rvv_uint16m8_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_u16m8_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m8_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ARG_1:%.*]], <vscale x 32 x i16> [[ARG_2:%.*]], <vscale x 32 x i16> [[ARG_3:%.*]], <vscale x 32 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m8_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ARG_1:%.*]], <vscale x 32 x i16> [[ARG_2:%.*]], <vscale x 32 x i16> [[ARG_3:%.*]], <vscale x 32 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
-//
-__rvv_uint16m8_t test_vadd_vv_u16m8_m_vl(__rvv_bool2_t arg_0, __rvv_uint16m8_t arg_1, __rvv_uint16m8_t arg_2, __rvv_uint16m8_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_u16m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m8_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ARG_0:%.*]], <vscale x 16 x i32> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m8_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ARG_0:%.*]], <vscale x 16 x i32> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
-//
-__rvv_uint32m8_t test_vadd_vv_u32m8_vl(__rvv_uint32m8_t arg_0, __rvv_uint32m8_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_u32m8_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m8_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ARG_1:%.*]], <vscale x 16 x i32> [[ARG_2:%.*]], <vscale x 16 x i32> [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m8_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ARG_1:%.*]], <vscale x 16 x i32> [[ARG_2:%.*]], <vscale x 16 x i32> [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
-//
-__rvv_uint32m8_t test_vadd_vv_u32m8_m_vl(__rvv_bool4_t arg_0, __rvv_uint32m8_t arg_1, __rvv_uint32m8_t arg_2, __rvv_uint32m8_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_u32m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m8_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ARG_0:%.*]], <vscale x 8 x i64> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m8_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ARG_0:%.*]], <vscale x 8 x i64> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
-//
-__rvv_uint64m8_t test_vadd_vv_u64m8_vl(__rvv_uint64m8_t arg_0, __rvv_uint64m8_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_u64m8_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m8_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ARG_1:%.*]], <vscale x 8 x i64> [[ARG_2:%.*]], <vscale x 8 x i64> [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m8_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ARG_1:%.*]], <vscale x 8 x i64> [[ARG_2:%.*]], <vscale x 8 x i64> [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
-//
-__rvv_uint64m8_t test_vadd_vv_u64m8_m_vl(__rvv_bool8_t arg_0, __rvv_uint64m8_t arg_1, __rvv_uint64m8_t arg_2, __rvv_uint64m8_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_u64m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8mf2_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ARG_0:%.*]], <vscale x 4 x i8> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8mf2_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ARG_0:%.*]], <vscale x 4 x i8> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
-//
-__rvv_uint8mf2_t test_vadd_vv_u8mf2_vl(__rvv_uint8mf2_t arg_0, __rvv_uint8mf2_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_u8mf2_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8mf2_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ARG_1:%.*]], <vscale x 4 x i8> [[ARG_2:%.*]], <vscale x 4 x i8> [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8mf2_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ARG_1:%.*]], <vscale x 4 x i8> [[ARG_2:%.*]], <vscale x 4 x i8> [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
-//
-__rvv_uint8mf2_t test_vadd_vv_u8mf2_m_vl(__rvv_bool16_t arg_0, __rvv_uint8mf2_t arg_1, __rvv_uint8mf2_t arg_2, __rvv_uint8mf2_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_u8mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16mf2_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ARG_0:%.*]], <vscale x 2 x i16> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16mf2_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ARG_0:%.*]], <vscale x 2 x i16> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
-//
-__rvv_uint16mf2_t test_vadd_vv_u16mf2_vl(__rvv_uint16mf2_t arg_0, __rvv_uint16mf2_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_u16mf2_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16mf2_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ARG_1:%.*]], <vscale x 2 x i16> [[ARG_2:%.*]], <vscale x 2 x i16> [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16mf2_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ARG_1:%.*]], <vscale x 2 x i16> [[ARG_2:%.*]], <vscale x 2 x i16> [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
-//
-__rvv_uint16mf2_t test_vadd_vv_u16mf2_m_vl(__rvv_bool32_t arg_0, __rvv_uint16mf2_t arg_1, __rvv_uint16mf2_t arg_2, __rvv_uint16mf2_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_u16mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32mf2_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ARG_0:%.*]], <vscale x 1 x i32> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32mf2_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ARG_0:%.*]], <vscale x 1 x i32> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
-//
-__rvv_uint32mf2_t test_vadd_vv_u32mf2_vl(__rvv_uint32mf2_t arg_0, __rvv_uint32mf2_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_u32mf2_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32mf2_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ARG_1:%.*]], <vscale x 1 x i32> [[ARG_2:%.*]], <vscale x 1 x i32> [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32mf2_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ARG_1:%.*]], <vscale x 1 x i32> [[ARG_2:%.*]], <vscale x 1 x i32> [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
-//
-__rvv_uint32mf2_t test_vadd_vv_u32mf2_m_vl(__rvv_bool64_t arg_0, __rvv_uint32mf2_t arg_1, __rvv_uint32mf2_t arg_2, __rvv_uint32mf2_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_u32mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8mf4_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ARG_0:%.*]], <vscale x 2 x i8> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8mf4_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ARG_0:%.*]], <vscale x 2 x i8> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
-//
-__rvv_uint8mf4_t test_vadd_vv_u8mf4_vl(__rvv_uint8mf4_t arg_0, __rvv_uint8mf4_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_u8mf4_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8mf4_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ARG_1:%.*]], <vscale x 2 x i8> [[ARG_2:%.*]], <vscale x 2 x i8> [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8mf4_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ARG_1:%.*]], <vscale x 2 x i8> [[ARG_2:%.*]], <vscale x 2 x i8> [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
-//
-__rvv_uint8mf4_t test_vadd_vv_u8mf4_m_vl(__rvv_bool32_t arg_0, __rvv_uint8mf4_t arg_1, __rvv_uint8mf4_t arg_2, __rvv_uint8mf4_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_u8mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16mf4_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ARG_0:%.*]], <vscale x 1 x i16> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16mf4_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ARG_0:%.*]], <vscale x 1 x i16> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
-//
-__rvv_uint16mf4_t test_vadd_vv_u16mf4_vl(__rvv_uint16mf4_t arg_0, __rvv_uint16mf4_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_u16mf4_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16mf4_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ARG_1:%.*]], <vscale x 1 x i16> [[ARG_2:%.*]], <vscale x 1 x i16> [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16mf4_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ARG_1:%.*]], <vscale x 1 x i16> [[ARG_2:%.*]], <vscale x 1 x i16> [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
-//
-__rvv_uint16mf4_t test_vadd_vv_u16mf4_m_vl(__rvv_bool64_t arg_0, __rvv_uint16mf4_t arg_1, __rvv_uint16mf4_t arg_2, __rvv_uint16mf4_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_u16mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8mf8_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ARG_0:%.*]], <vscale x 1 x i8> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8mf8_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ARG_0:%.*]], <vscale x 1 x i8> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
-//
-__rvv_uint8mf8_t test_vadd_vv_u8mf8_vl(__rvv_uint8mf8_t arg_0, __rvv_uint8mf8_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vv_u8mf8_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8mf8_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ARG_1:%.*]], <vscale x 1 x i8> [[ARG_2:%.*]], <vscale x 1 x i8> [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8mf8_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ARG_1:%.*]], <vscale x 1 x i8> [[ARG_2:%.*]], <vscale x 1 x i8> [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
-//
-__rvv_uint8mf8_t test_vadd_vv_u8mf8_m_vl(__rvv_bool64_t arg_0, __rvv_uint8mf8_t arg_1, __rvv_uint8mf8_t arg_2, __rvv_uint8mf8_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vv_u8mf8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m1_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m1_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i32(<vscale x 8 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
-//
-__rvv_uint8m1_t test_vadd_vx_u8m1_vl(__rvv_uint8m1_t arg_0, uint8_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_u8m1_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m1_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ARG_1:%.*]], <vscale x 8 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m1_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ARG_1:%.*]], <vscale x 8 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
-//
-__rvv_uint8m1_t test_vadd_vx_u8m1_m_vl(__rvv_bool8_t arg_0, __rvv_uint8m1_t arg_1, __rvv_uint8m1_t arg_2, uint8_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_u8m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m1_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m1_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i32(<vscale x 4 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
-//
-__rvv_uint16m1_t test_vadd_vx_u16m1_vl(__rvv_uint16m1_t arg_0, uint16_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_u16m1_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m1_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ARG_1:%.*]], <vscale x 4 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m1_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ARG_1:%.*]], <vscale x 4 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
-//
-__rvv_uint16m1_t test_vadd_vx_u16m1_m_vl(__rvv_bool16_t arg_0, __rvv_uint16m1_t arg_1, __rvv_uint16m1_t arg_2, uint16_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_u16m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m1_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m1_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i32(<vscale x 2 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
-//
-__rvv_uint32m1_t test_vadd_vx_u32m1_vl(__rvv_uint32m1_t arg_0, uint32_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_u32m1_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m1_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ARG_1:%.*]], <vscale x 2 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m1_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ARG_1:%.*]], <vscale x 2 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
-//
-__rvv_uint32m1_t test_vadd_vx_u32m1_m_vl(__rvv_bool32_t arg_0, __rvv_uint32m1_t arg_1, __rvv_uint32m1_t arg_2, uint32_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_u32m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m1_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m1_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i32(<vscale x 1 x i64> [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
-//
-__rvv_uint64m1_t test_vadd_vx_u64m1_vl(__rvv_uint64m1_t arg_0, uint64_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_u64m1_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m1_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ARG_1:%.*]], <vscale x 1 x i64> [[ARG_2:%.*]], i64 [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m1_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ARG_1:%.*]], <vscale x 1 x i64> [[ARG_2:%.*]], i64 [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
-//
-__rvv_uint64m1_t test_vadd_vx_u64m1_m_vl(__rvv_bool64_t arg_0, __rvv_uint64m1_t arg_1, __rvv_uint64m1_t arg_2, uint64_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_u64m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m2_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m2_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i32(<vscale x 16 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
-//
-__rvv_uint8m2_t test_vadd_vx_u8m2_vl(__rvv_uint8m2_t arg_0, uint8_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_u8m2_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m2_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ARG_1:%.*]], <vscale x 16 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m2_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ARG_1:%.*]], <vscale x 16 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
-//
-__rvv_uint8m2_t test_vadd_vx_u8m2_m_vl(__rvv_bool4_t arg_0, __rvv_uint8m2_t arg_1, __rvv_uint8m2_t arg_2, uint8_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_u8m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m2_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m2_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i32(<vscale x 8 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
-//
-__rvv_uint16m2_t test_vadd_vx_u16m2_vl(__rvv_uint16m2_t arg_0, uint16_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_u16m2_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m2_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ARG_1:%.*]], <vscale x 8 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m2_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ARG_1:%.*]], <vscale x 8 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
-//
-__rvv_uint16m2_t test_vadd_vx_u16m2_m_vl(__rvv_bool8_t arg_0, __rvv_uint16m2_t arg_1, __rvv_uint16m2_t arg_2, uint16_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_u16m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m2_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m2_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i32(<vscale x 4 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
-//
-__rvv_uint32m2_t test_vadd_vx_u32m2_vl(__rvv_uint32m2_t arg_0, uint32_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_u32m2_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m2_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ARG_1:%.*]], <vscale x 4 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m2_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ARG_1:%.*]], <vscale x 4 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
-//
-__rvv_uint32m2_t test_vadd_vx_u32m2_m_vl(__rvv_bool16_t arg_0, __rvv_uint32m2_t arg_1, __rvv_uint32m2_t arg_2, uint32_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_u32m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m2_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m2_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i32(<vscale x 2 x i64> [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
-//
-__rvv_uint64m2_t test_vadd_vx_u64m2_vl(__rvv_uint64m2_t arg_0, uint64_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_u64m2_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m2_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ARG_1:%.*]], <vscale x 2 x i64> [[ARG_2:%.*]], i64 [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m2_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ARG_1:%.*]], <vscale x 2 x i64> [[ARG_2:%.*]], i64 [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
-//
-__rvv_uint64m2_t test_vadd_vx_u64m2_m_vl(__rvv_bool32_t arg_0, __rvv_uint64m2_t arg_1, __rvv_uint64m2_t arg_2, uint64_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_u64m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m4_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m4_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i32(<vscale x 32 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
-//
-__rvv_uint8m4_t test_vadd_vx_u8m4_vl(__rvv_uint8m4_t arg_0, uint8_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_u8m4_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m4_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ARG_1:%.*]], <vscale x 32 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 32 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m4_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ARG_1:%.*]], <vscale x 32 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 32 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
-//
-__rvv_uint8m4_t test_vadd_vx_u8m4_m_vl(__rvv_bool2_t arg_0, __rvv_uint8m4_t arg_1, __rvv_uint8m4_t arg_2, uint8_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_u8m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m4_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m4_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i32(<vscale x 16 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
-//
-__rvv_uint16m4_t test_vadd_vx_u16m4_vl(__rvv_uint16m4_t arg_0, uint16_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_u16m4_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m4_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ARG_1:%.*]], <vscale x 16 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m4_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ARG_1:%.*]], <vscale x 16 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
-//
-__rvv_uint16m4_t test_vadd_vx_u16m4_m_vl(__rvv_bool4_t arg_0, __rvv_uint16m4_t arg_1, __rvv_uint16m4_t arg_2, uint16_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_u16m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m4_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m4_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i32(<vscale x 8 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
-//
-__rvv_uint32m4_t test_vadd_vx_u32m4_vl(__rvv_uint32m4_t arg_0, uint32_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_u32m4_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m4_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ARG_1:%.*]], <vscale x 8 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m4_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ARG_1:%.*]], <vscale x 8 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
-//
-__rvv_uint32m4_t test_vadd_vx_u32m4_m_vl(__rvv_bool8_t arg_0, __rvv_uint32m4_t arg_1, __rvv_uint32m4_t arg_2, uint32_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_u32m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m4_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m4_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i32(<vscale x 4 x i64> [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
-//
-__rvv_uint64m4_t test_vadd_vx_u64m4_vl(__rvv_uint64m4_t arg_0, uint64_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_u64m4_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m4_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ARG_1:%.*]], <vscale x 4 x i64> [[ARG_2:%.*]], i64 [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m4_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ARG_1:%.*]], <vscale x 4 x i64> [[ARG_2:%.*]], i64 [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
-//
-__rvv_uint64m4_t test_vadd_vx_u64m4_m_vl(__rvv_bool16_t arg_0, __rvv_uint64m4_t arg_1, __rvv_uint64m4_t arg_2, uint64_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_u64m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m8_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m8_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i32(<vscale x 64 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
-//
-__rvv_uint8m8_t test_vadd_vx_u8m8_vl(__rvv_uint8m8_t arg_0, uint8_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_u8m8_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m8_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ARG_1:%.*]], <vscale x 64 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 64 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m8_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ARG_1:%.*]], <vscale x 64 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 64 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
-//
-__rvv_uint8m8_t test_vadd_vx_u8m8_m_vl(__rvv_bool1_t arg_0, __rvv_uint8m8_t arg_1, __rvv_uint8m8_t arg_2, uint8_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_u8m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m8_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m8_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i32(<vscale x 32 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
-//
-__rvv_uint16m8_t test_vadd_vx_u16m8_vl(__rvv_uint16m8_t arg_0, uint16_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_u16m8_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m8_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ARG_1:%.*]], <vscale x 32 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 32 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m8_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ARG_1:%.*]], <vscale x 32 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 32 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
-//
-__rvv_uint16m8_t test_vadd_vx_u16m8_m_vl(__rvv_bool2_t arg_0, __rvv_uint16m8_t arg_1, __rvv_uint16m8_t arg_2, uint16_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_u16m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m8_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m8_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i32(<vscale x 16 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
-//
-__rvv_uint32m8_t test_vadd_vx_u32m8_vl(__rvv_uint32m8_t arg_0, uint32_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_u32m8_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m8_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ARG_1:%.*]], <vscale x 16 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m8_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ARG_1:%.*]], <vscale x 16 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
-//
-__rvv_uint32m8_t test_vadd_vx_u32m8_m_vl(__rvv_bool4_t arg_0, __rvv_uint32m8_t arg_1, __rvv_uint32m8_t arg_2, uint32_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_u32m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m8_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m8_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i32(<vscale x 8 x i64> [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
-//
-__rvv_uint64m8_t test_vadd_vx_u64m8_vl(__rvv_uint64m8_t arg_0, uint64_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_u64m8_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m8_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ARG_1:%.*]], <vscale x 8 x i64> [[ARG_2:%.*]], i64 [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m8_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ARG_1:%.*]], <vscale x 8 x i64> [[ARG_2:%.*]], i64 [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
-//
-__rvv_uint64m8_t test_vadd_vx_u64m8_m_vl(__rvv_bool8_t arg_0, __rvv_uint64m8_t arg_1, __rvv_uint64m8_t arg_2, uint64_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_u64m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8mf2_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8mf2_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i32(<vscale x 4 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
-//
-__rvv_uint8mf2_t test_vadd_vx_u8mf2_vl(__rvv_uint8mf2_t arg_0, uint8_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_u8mf2_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8mf2_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ARG_1:%.*]], <vscale x 4 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8mf2_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ARG_1:%.*]], <vscale x 4 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
-//
-__rvv_uint8mf2_t test_vadd_vx_u8mf2_m_vl(__rvv_bool16_t arg_0, __rvv_uint8mf2_t arg_1, __rvv_uint8mf2_t arg_2, uint8_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_u8mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16mf2_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16mf2_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i32(<vscale x 2 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
-//
-__rvv_uint16mf2_t test_vadd_vx_u16mf2_vl(__rvv_uint16mf2_t arg_0, uint16_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_u16mf2_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16mf2_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ARG_1:%.*]], <vscale x 2 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16mf2_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ARG_1:%.*]], <vscale x 2 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
-//
-__rvv_uint16mf2_t test_vadd_vx_u16mf2_m_vl(__rvv_bool32_t arg_0, __rvv_uint16mf2_t arg_1, __rvv_uint16mf2_t arg_2, uint16_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_u16mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32mf2_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32mf2_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i32(<vscale x 1 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
-//
-__rvv_uint32mf2_t test_vadd_vx_u32mf2_vl(__rvv_uint32mf2_t arg_0, uint32_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_u32mf2_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32mf2_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ARG_1:%.*]], <vscale x 1 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32mf2_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ARG_1:%.*]], <vscale x 1 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
-//
-__rvv_uint32mf2_t test_vadd_vx_u32mf2_m_vl(__rvv_bool64_t arg_0, __rvv_uint32mf2_t arg_1, __rvv_uint32mf2_t arg_2, uint32_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_u32mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8mf4_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8mf4_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i32(<vscale x 2 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
-//
-__rvv_uint8mf4_t test_vadd_vx_u8mf4_vl(__rvv_uint8mf4_t arg_0, uint8_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_u8mf4_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8mf4_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ARG_1:%.*]], <vscale x 2 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8mf4_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ARG_1:%.*]], <vscale x 2 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
-//
-__rvv_uint8mf4_t test_vadd_vx_u8mf4_m_vl(__rvv_bool32_t arg_0, __rvv_uint8mf4_t arg_1, __rvv_uint8mf4_t arg_2, uint8_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_u8mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16mf4_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16mf4_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i32(<vscale x 1 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
-//
-__rvv_uint16mf4_t test_vadd_vx_u16mf4_vl(__rvv_uint16mf4_t arg_0, uint16_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_u16mf4_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16mf4_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ARG_1:%.*]], <vscale x 1 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16mf4_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ARG_1:%.*]], <vscale x 1 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
-//
-__rvv_uint16mf4_t test_vadd_vx_u16mf4_m_vl(__rvv_bool64_t arg_0, __rvv_uint16mf4_t arg_1, __rvv_uint16mf4_t arg_2, uint16_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_u16mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8mf8_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8mf8_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i32(<vscale x 1 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
-//
-__rvv_uint8mf8_t test_vadd_vx_u8mf8_vl(__rvv_uint8mf8_t arg_0, uint8_t arg_1, size_t arg_2)
-{
-    return __builtin_rvv_vadd_vx_u8mf8_vl(arg_0, arg_1, arg_2);
-}
-
-// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8mf8_m_vl(
-// CHECK-RV64-O2-NEXT:  entry:
-// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ARG_1:%.*]], <vscale x 1 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
-// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
-//
-// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8mf8_m_vl(
-// CHECK-RV32-O2-NEXT:  entry:
-// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ARG_1:%.*]], <vscale x 1 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
-// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
-//
-__rvv_uint8mf8_t test_vadd_vx_u8mf8_m_vl(__rvv_bool64_t arg_0, __rvv_uint8mf8_t arg_1, __rvv_uint8mf8_t arg_2, uint8_t arg_3, size_t arg_4)
-{
-    return __builtin_rvv_vadd_vx_u8mf8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
-}

diff  --git a/clang/test/Headers/riscv-vector-header.c b/clang/test/Headers/riscv-vector-header.c
new file mode 100644
index 000000000000..beb54a30ee70
--- /dev/null
+++ b/clang/test/Headers/riscv-vector-header.c
@@ -0,0 +1,6 @@
+// RUN: %clang_cc1 -triple riscv64 -fsyntax-only \
+// RUN:   -target-feature +m -target-feature +a -target-feature +f \
+// RUN:   -target-feature +d -target-feature +experimental-v %s
+// expected-no-diagnostics
+
+#include <riscv_vector.h>

diff  --git a/clang/utils/TableGen/CMakeLists.txt b/clang/utils/TableGen/CMakeLists.txt
index 26589ec7e1ce..6379cc4e11e8 100644
--- a/clang/utils/TableGen/CMakeLists.txt
+++ b/clang/utils/TableGen/CMakeLists.txt
@@ -18,6 +18,7 @@ add_tablegen(clang-tblgen CLANG
   ClangTypeNodesEmitter.cpp
   MveEmitter.cpp
   NeonEmitter.cpp
+  RISCVVEmitter.cpp
   SveEmitter.cpp
   TableGen.cpp
   )

diff  --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp
new file mode 100644
index 000000000000..3a84590c3a65
--- /dev/null
+++ b/clang/utils/TableGen/RISCVVEmitter.cpp
@@ -0,0 +1,1074 @@
+//===- RISCVVEmitter.cpp - Generate riscv_vector.h for use with clang -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend is responsible for emitting riscv_vector.h and
+// riscv_vector_generic.h, which includes a declaration and definition of each
+// intrinsic fucntions specified in https://github.com/riscv/rvv-intrinsic-doc.
+//
+// See also the documentation in include/clang/Basic/riscv_vector.td.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/TableGen/Record.h"
+#include <numeric>
+
+using namespace llvm;
+using BasicType = char;
+using VScaleVal = Optional<unsigned>;
+
+namespace {
+
+// Exponential LMUL
+class LMULType {
+private:
+  int Log2LMUL;
+
+public:
+  LMULType(int Log2LMUL);
+  // Return the C/C++ string representation of LMUL
+  std::string str() const;
+  Optional<unsigned> getScale(unsigned ElementBitwidth) const;
+  LMULType &operator*=(unsigned RHS);
+};
+
+// This class is compact representation of a valid and invalid RVVType.
+class RVVType {
+  enum ScalarTypeKind : uint32_t {
+    Void,
+    Size_t,
+    Ptr
diff _t,
+    Boolean,
+    SignedInteger,
+    UnsignedInteger,
+    Float,
+    Invalid,
+  };
+  BasicType BT;
+  ScalarTypeKind ScalarType = Invalid;
+  LMULType LMUL;
+  bool IsPointer = false;
+  // IsConstant indices are "int", but have the constant expression.
+  bool IsImmediate = false;
+  // Const qualifier for pointer to const object or object of const type.
+  bool IsConstant = false;
+  unsigned ElementBitwidth = 0;
+  VScaleVal Scale = 0;
+  bool Valid;
+
+  std::string BuiltinStr;
+  std::string ClangBuiltinStr;
+  std::string Str;
+  std::string ShortStr;
+
+public:
+  RVVType() : RVVType(BasicType(), 0, StringRef()) {}
+  RVVType(BasicType BT, int Log2LMUL, StringRef prototype);
+
+  // Return the string representation of a type, which is an encoded string for
+  // passing to the BUILTIN() macro in Builtins.def.
+  const std::string &getBuiltinStr() const { return BuiltinStr; }
+
+  // Return the clang buitlin type for RVV vector type which are used in the
+  // riscv_vector.h header file.
+  const std::string &getClangBuiltinStr() const { return ClangBuiltinStr; }
+
+  // Return the C/C++ string representation of a type for use in the
+  // riscv_vector.h header file.
+  const std::string &getTypeStr() const { return Str; }
+
+  // Return the short name of a type for C/C++ name suffix.
+  const std::string &getShortStr() const { return ShortStr; }
+
+  bool isValid() const { return Valid; }
+  bool isScalar() const { return Scale.hasValue() && Scale.getValue() == 0; }
+  bool isVector() const { return Scale.hasValue() && Scale.getValue() != 0; }
+  bool isFloat() const { return ScalarType == ScalarTypeKind::Float; }
+  bool isSignedInteger() const {
+    return ScalarType == ScalarTypeKind::SignedInteger;
+  }
+  bool isFloatVector(unsigned Width) const {
+    return isVector() && isFloat() && ElementBitwidth == Width;
+  }
+
+private:
+  // Verify RVV vector type and set Valid.
+  bool verifyType() const;
+
+  // Creates a type based on basic types of TypeRange
+  void applyBasicType();
+
+  // Applies a prototype modifier to the current type. The result maybe an
+  // invalid type.
+  void applyModifier(StringRef prototype);
+
+  // Compute and record a string for legal type.
+  void initBuiltinStr();
+  // Compute and record a builtin RVV vector type string.
+  void initClangBuiltinStr();
+  // Compute and record a type string for used in the header.
+  void initTypeStr();
+  // Compute and record a short name of a type for C/C++ name suffix.
+  void initShortStr();
+};
+
+using RVVTypePtr = RVVType *;
+using RVVTypes = std::vector<RVVTypePtr>;
+
+enum RISCVExtension : uint8_t {
+  Basic = 0,
+  F = 1 << 1,
+  D = 1 << 2,
+  Zfh = 1 << 3
+};
+
+// TODO refactor RVVIntrinsic class design after support all intrinsic
+// combination. This represents an instantiation of an intrinsic with a
+// particular type and prototype
+class RVVIntrinsic {
+
+private:
+  std::string Name; // Builtin name
+  std::string MangledName;
+  std::string IRName;
+  bool HasSideEffects;
+  bool HasMaskedOffOperand;
+  bool HasVL;
+  bool HasGeneric;
+  RVVTypePtr OutputType; // Builtin output type
+  RVVTypes InputTypes;   // Builtin input types
+  // The types we use to obtain the specific LLVM intrinsic. They are index of
+  // InputTypes. -1 means the return type.
+  std::vector<int64_t> IntrinsicTypes;
+  // C/C++ intrinsic operand order is 
diff erent to builtin operand order. Record
+  // the mapping of InputTypes index.
+  SmallVector<unsigned> CTypeOrder;
+  uint8_t RISCVExtensions = 0;
+
+public:
+  RVVIntrinsic(StringRef Name, StringRef Suffix, StringRef MangledName,
+               StringRef IRName, bool HasSideEffects, bool IsMask,
+               bool HasMaskedOffOperand, bool HasVL, bool HasGeneric,
+               const RVVTypes &Types,
+               const std::vector<int64_t> &RVVIntrinsicTypes);
+  ~RVVIntrinsic() = default;
+
+  StringRef getName() const { return Name; }
+  StringRef getMangledName() const { return MangledName; }
+  bool hasSideEffects() const { return HasSideEffects; }
+  bool hasMaskedOffOperand() const { return HasMaskedOffOperand; }
+  bool hasVL() const { return HasVL; }
+  bool hasGeneric() const { return HasGeneric; }
+  size_t getNumOperand() const { return InputTypes.size(); }
+  StringRef getIRName() const { return IRName; }
+  uint8_t getRISCVExtensions() const { return RISCVExtensions; }
+
+  // Return the type string for a BUILTIN() macro in Builtins.def.
+  std::string getBuiltinTypeStr() const;
+
+  // Emit the code block for switch body in EmitRISCVBuiltinExpr, it should
+  // init the RVVIntrinsic ID and IntrinsicTypes.
+  void emitCodeGenSwitchBody(raw_ostream &o) const;
+
+  // Emit the macros for mapping C/C++ intrinsic function to builtin functions.
+  void emitIntrinsicMacro(raw_ostream &o) const;
+
+  // Emit the mangled function definition.
+  void emitMangledFuncDef(raw_ostream &o) const;
+};
+
+class RVVEmitter {
+private:
+  RecordKeeper &Records;
+  // Concat BasicType, LMUL and Proto as key
+  StringMap<RVVType> LegalTypes;
+  StringSet<> IllegalTypes;
+
+public:
+  RVVEmitter(RecordKeeper &R) : Records(R) {}
+
+  /// Emit riscv_vector.h
+  void createHeader(raw_ostream &o);
+
+  /// Emit riscv_generic.h
+  void createGenericHeader(raw_ostream &o);
+
+  /// Emit all the __builtin prototypes and code needed by Sema.
+  void createBuiltins(raw_ostream &o);
+
+  /// Emit all the information needed to map builtin -> LLVM IR intrinsic.
+  void createCodeGen(raw_ostream &o);
+
+private:
+  /// Create all intrinsics and add them to \p Out
+  void createRVVIntrinsics(std::vector<std::unique_ptr<RVVIntrinsic>> &Out);
+  /// Compute output and input types by applying 
diff erent config (basic type
+  /// and LMUL with type transformers). It also record result of type in legal
+  /// or illegal set to avoid compute the  same config again. The result maybe
+  /// have illegal RVVType.
+  Optional<RVVTypes> computeTypes(BasicType BT, int Log2LMUL,
+                                  ArrayRef<std::string> PrototypeSeq);
+  Optional<RVVTypePtr> computeType(BasicType BT, int Log2LMUL, StringRef Proto);
+
+  /// Emit Acrh predecessor definitions and body
+  void emitArchMacroAndBody(
+      std::vector<std::unique_ptr<RVVIntrinsic>> &Defs, raw_ostream &o,
+      std::function<void(raw_ostream &, const RVVIntrinsic &)>);
+
+  // Emit the architecture preprocessor definitions. Return true when emits
+  // non-empty string.
+  bool emitExtDefStr(uint8_t Extensions, raw_ostream &o);
+};
+
+} // namespace
+
+//===----------------------------------------------------------------------===//
+// Type implementation
+//===----------------------------------------------------------------------===//
+
+LMULType::LMULType(int NewLog2LMUL) {
+  // Check Log2LMUL is -3, -2, -1, 0, 1, 2, 3
+  assert(NewLog2LMUL <= 3 && NewLog2LMUL >= -3 && "Bad LMUL number!");
+  Log2LMUL = NewLog2LMUL;
+}
+
+std::string LMULType::str() const {
+  if (Log2LMUL < 0)
+    return "mf" + utostr(1 << (-Log2LMUL));
+  return "m" + utostr(1 << Log2LMUL);
+}
+
+VScaleVal LMULType::getScale(unsigned ElementBitwidth) const {
+  int Log2ScaleResult = 0;
+  switch (ElementBitwidth) {
+  default:
+    break;
+  case 8:
+    Log2ScaleResult = Log2LMUL + 3;
+    break;
+  case 16:
+    Log2ScaleResult = Log2LMUL + 2;
+    break;
+  case 32:
+    Log2ScaleResult = Log2LMUL + 1;
+    break;
+  case 64:
+    Log2ScaleResult = Log2LMUL;
+    break;
+  }
+  // Illegal vscale result would be less than 1
+  if (Log2ScaleResult < 0)
+    return None;
+  return 1 << Log2ScaleResult;
+}
+
+LMULType &LMULType::operator*=(uint32_t RHS) {
+  assert(isPowerOf2_32(RHS));
+  this->Log2LMUL = this->Log2LMUL + Log2_32(RHS);
+  return *this;
+}
+
+RVVType::RVVType(BasicType BT, int Log2LMUL, StringRef prototype)
+    : BT(BT), LMUL(LMULType(Log2LMUL)) {
+  applyBasicType();
+  applyModifier(prototype);
+  Valid = verifyType();
+  if (Valid) {
+    initBuiltinStr();
+    initTypeStr();
+    if (isVector()) {
+      initClangBuiltinStr();
+      initShortStr();
+    }
+  }
+}
+
+// clang-format off
+// boolean type are encoded the ratio of n (SEW/LMUL)
+// SEW/LMUL | 1         | 2         | 4         | 8        | 16        | 32        | 64
+// c type   | vbool64_t | vbool32_t | vbool16_t | vbool8_t | vbool4_t  | vbool2_t  | vbool1_t
+// IR type  | nxv1i1    | nxv2i1    | nxv4i1    | nxv8i1   | nxv16i1   | nxv32i1   | nxv64i1
+
+// type\lmul | 1/8    | 1/4      | 1/2     | 1       | 2        | 4        | 8
+// --------  |------  | -------- | ------- | ------- | -------- | -------- | --------
+// i64       | N/A    | N/A      | N/A     | nxv1i64 | nxv2i64  | nxv4i64  | nxv8i64
+// i32       | N/A    | N/A      | nxv1i32 | nxv2i32 | nxv4i32  | nxv8i32  | nxv16i32
+// i16       | N/A    | nxv1i16  | nxv2i16 | nxv4i16 | nxv8i16  | nxv16i16 | nxv32i16
+// i8        | nxv1i8 | nxv2i8   | nxv4i8  | nxv8i8  | nxv16i8  | nxv32i8  | nxv64i8
+// double    | N/A    | N/A      | N/A     | nxv1f64 | nxv2f64  | nxv4f64  | nxv8f64
+// float     | N/A    | N/A      | nxv1f32 | nxv2f32 | nxv4f32  | nxv8f32  | nxv16f32
+// half      | N/A    | nxv1f16  | nxv2f16 | nxv4f16 | nxv8f16  | nxv16f16 | nxv32f16
+// clang-format on
+
+bool RVVType::verifyType() const {
+  if (isScalar())
+    return true;
+  if (!Scale.hasValue())
+    return false;
+  if (isFloat() && ElementBitwidth == 8)
+    return false;
+  unsigned V = Scale.getValue();
+  switch (ElementBitwidth) {
+  case 1:
+  case 8:
+    // Check Scale is 1,2,4,8,16,32,64
+    return (V <= 64 && isPowerOf2_32(V));
+  case 16:
+    // Check Scale is 1,2,4,8,16,32
+    return (V <= 32 && isPowerOf2_32(V));
+  case 32:
+    // Check Scale is 1,2,4,8,16
+    return (V <= 16 && isPowerOf2_32(V));
+  case 64:
+    // Check Scale is 1,2,4,8
+    return (V <= 8 && isPowerOf2_32(V));
+  }
+  return false;
+}
+
+void RVVType::initBuiltinStr() {
+  assert(isValid() && "RVVType is invalid");
+  switch (ScalarType) {
+  case ScalarTypeKind::Void:
+    BuiltinStr = "v";
+    return;
+  case ScalarTypeKind::Size_t:
+    BuiltinStr = "z";
+    if (IsImmediate)
+      BuiltinStr = "I" + BuiltinStr;
+    return;
+  case ScalarTypeKind::Ptr
diff _t:
+    BuiltinStr = "Y";
+    return;
+  case ScalarTypeKind::Boolean:
+    assert(ElementBitwidth == 1);
+    BuiltinStr += "b";
+    break;
+  case ScalarTypeKind::SignedInteger:
+  case ScalarTypeKind::UnsignedInteger:
+    switch (ElementBitwidth) {
+    case 8:
+      BuiltinStr += "c";
+      break;
+    case 16:
+      BuiltinStr += "s";
+      break;
+    case 32:
+      BuiltinStr += "i";
+      break;
+    case 64:
+      BuiltinStr += "Wi";
+      break;
+    default:
+      llvm_unreachable("Unhandled ElementBitwidth!");
+    }
+    if (isSignedInteger())
+      BuiltinStr = "S" + BuiltinStr;
+    else
+      BuiltinStr = "U" + BuiltinStr;
+    break;
+  case ScalarTypeKind::Float:
+    switch (ElementBitwidth) {
+    case 16:
+      BuiltinStr += "h";
+      break;
+    case 32:
+      BuiltinStr += "f";
+      break;
+    case 64:
+      BuiltinStr += "d";
+      break;
+    default:
+      llvm_unreachable("Unhandled ElementBitwidth!");
+    }
+    break;
+  default:
+    llvm_unreachable("ScalarType is invalid!");
+  }
+  if (IsImmediate)
+    BuiltinStr = "I" + BuiltinStr;
+  if (isScalar()) {
+    if (IsConstant)
+      BuiltinStr += "C";
+    if (IsPointer)
+      BuiltinStr += "*";
+    return;
+  }
+  BuiltinStr = "q" + utostr(Scale.getValue()) + BuiltinStr;
+}
+
+void RVVType::initClangBuiltinStr() {
+  assert(isValid() && "RVVType is invalid");
+  assert(isVector() && "Handle Vector type only");
+
+  ClangBuiltinStr = "__rvv_";
+  switch (ScalarType) {
+  case ScalarTypeKind::Boolean:
+    ClangBuiltinStr += "bool" + utostr(64 / Scale.getValue()) + "_t";
+    return;
+  case ScalarTypeKind::Float:
+    ClangBuiltinStr += "float";
+    break;
+  case ScalarTypeKind::SignedInteger:
+    ClangBuiltinStr += "int";
+    break;
+  case ScalarTypeKind::UnsignedInteger:
+    ClangBuiltinStr += "uint";
+    break;
+  default:
+    llvm_unreachable("ScalarTypeKind is invalid");
+  }
+  ClangBuiltinStr += utostr(ElementBitwidth) + LMUL.str() + "_t";
+}
+
+void RVVType::initTypeStr() {
+  assert(isValid() && "RVVType is invalid");
+
+  if (IsConstant)
+    Str += "const ";
+
+  auto getTypeString = [&](StringRef TypeStr) {
+    if (isScalar())
+      return Twine(TypeStr + Twine(ElementBitwidth) + "_t").str();
+    return Twine("v" + TypeStr + Twine(ElementBitwidth) + LMUL.str() + "_t")
+        .str();
+  };
+
+  switch (ScalarType) {
+  case ScalarTypeKind::Void:
+    Str = "void";
+    return;
+  case ScalarTypeKind::Size_t:
+    Str = "size_t";
+    return;
+  case ScalarTypeKind::Ptr
diff _t:
+    Str = "ptr
diff _t";
+    return;
+  case ScalarTypeKind::Boolean:
+    if (isScalar())
+      Str += "bool";
+    else
+      // Vector bool is special case, the formulate is
+      // `vbool<N>_t = MVT::nxv<64/N>i1` ex. vbool16_t = MVT::4i1
+      Str += "vbool" + utostr(64 / Scale.getValue()) + "_t";
+    break;
+  case ScalarTypeKind::Float:
+    if (isScalar()) {
+      if (ElementBitwidth == 64)
+        Str += "double";
+      else if (ElementBitwidth == 32)
+        Str += "float";
+      assert((ElementBitwidth == 32 || ElementBitwidth == 64) &&
+             "Unhandled floating type");
+    } else
+      Str += getTypeString("float");
+    break;
+  case ScalarTypeKind::SignedInteger:
+    Str += getTypeString("int");
+    break;
+  case ScalarTypeKind::UnsignedInteger:
+    Str += getTypeString("uint");
+    break;
+  default:
+    llvm_unreachable("ScalarType is invalid!");
+  }
+  if (IsPointer)
+    Str += " *";
+}
+
+void RVVType::initShortStr() {
+  assert(isVector() && "only handle vector type");
+  switch (ScalarType) {
+  case ScalarTypeKind::Boolean:
+    ShortStr = "b" + utostr(64 / Scale.getValue());
+    break;
+  case ScalarTypeKind::Float:
+    ShortStr = "f" + utostr(ElementBitwidth) + LMUL.str();
+    break;
+  case ScalarTypeKind::SignedInteger:
+    ShortStr = "i" + utostr(ElementBitwidth) + LMUL.str();
+    break;
+  case ScalarTypeKind::UnsignedInteger:
+    ShortStr = "u" + utostr(ElementBitwidth) + LMUL.str();
+    break;
+  default:
+    llvm_unreachable("Unhandled case!");
+  }
+}
+
+void RVVType::applyBasicType() {
+  switch (BT) {
+  case 'c':
+    ElementBitwidth = 8;
+    ScalarType = ScalarTypeKind::SignedInteger;
+    break;
+  case 's':
+    ElementBitwidth = 16;
+    ScalarType = ScalarTypeKind::SignedInteger;
+    break;
+  case 'i':
+    ElementBitwidth = 32;
+    ScalarType = ScalarTypeKind::SignedInteger;
+    break;
+  case 'l':
+    ElementBitwidth = 64;
+    ScalarType = ScalarTypeKind::SignedInteger;
+    break;
+  case 'h':
+    ElementBitwidth = 16;
+    ScalarType = ScalarTypeKind::Float;
+    break;
+  case 'f':
+    ElementBitwidth = 32;
+    ScalarType = ScalarTypeKind::Float;
+    break;
+  case 'd':
+    ElementBitwidth = 64;
+    ScalarType = ScalarTypeKind::Float;
+    break;
+  default:
+    llvm_unreachable("Unhandled type code!");
+  }
+  assert(ElementBitwidth != 0 && "Bad element bitwidth!");
+}
+
+void RVVType::applyModifier(StringRef Transformer) {
+  if (Transformer.empty())
+    return;
+  // Handle primitive type transformer
+  switch (Transformer.back()) {
+  case 'e':
+    Scale = 0;
+    break;
+  case 'v':
+    Scale = LMUL.getScale(ElementBitwidth);
+    break;
+  case 'w':
+    ElementBitwidth *= 2;
+    LMUL *= 2;
+    Scale = LMUL.getScale(ElementBitwidth);
+    break;
+  case 'q':
+    ElementBitwidth *= 4;
+    LMUL *= 4;
+    Scale = LMUL.getScale(ElementBitwidth);
+    break;
+  case 'o':
+    ElementBitwidth *= 8;
+    LMUL *= 8;
+    Scale = LMUL.getScale(ElementBitwidth);
+    break;
+  case 'm':
+    ScalarType = ScalarTypeKind::Boolean;
+    Scale = LMUL.getScale(ElementBitwidth);
+    ElementBitwidth = 1;
+    break;
+  case '0':
+    ScalarType = ScalarTypeKind::Void;
+    break;
+  case 'z':
+    ScalarType = ScalarTypeKind::Size_t;
+    break;
+  case 't':
+    ScalarType = ScalarTypeKind::Ptr
diff _t;
+    break;
+  case 'c': // uint8_t
+    ScalarType = ScalarTypeKind::UnsignedInteger;
+    ElementBitwidth = 8;
+    Scale = 0;
+    break;
+  default:
+    llvm_unreachable("Illegal primitive type transformers!");
+  }
+  Transformer = Transformer.drop_back();
+
+  // Compute type transformers
+  for (char I : Transformer) {
+    switch (I) {
+    case 'P':
+      IsPointer = true;
+      break;
+    case 'C':
+      IsConstant = true;
+      break;
+    case 'K':
+      IsImmediate = true;
+      break;
+    case 'U':
+      ScalarType = ScalarTypeKind::UnsignedInteger;
+      break;
+    case 'I':
+      ScalarType = ScalarTypeKind::SignedInteger;
+      break;
+    case 'F':
+      ScalarType = ScalarTypeKind::Float;
+      break;
+    case 'S':
+      LMUL = LMULType(0);
+      // Update ElementBitwidth need ot update Scale too.
+      Scale = LMUL.getScale(ElementBitwidth);
+      break;
+    default:
+      llvm_unreachable("Illegal non-primitive type transformer!");
+    }
+  }
+}
+
+//===----------------------------------------------------------------------===//
+// RVVIntrinsic implementation
+//===----------------------------------------------------------------------===//
+RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix,
+                           StringRef NewMangledName, StringRef IRName,
+                           bool HasSideEffects, bool IsMask,
+                           bool HasMaskedOffOperand, bool HasVL,
+                           bool HasGeneric, const RVVTypes &OutInTypes,
+                           const std::vector<int64_t> &NewIntrinsicTypes)
+    : IRName(IRName), HasSideEffects(HasSideEffects),
+      HasMaskedOffOperand(HasMaskedOffOperand), HasVL(HasVL),
+      HasGeneric(HasGeneric) {
+
+  // Init Name and MangledName
+  Name = NewName.str();
+  if (NewMangledName.empty())
+    MangledName = NewName.split("_").first.str();
+  else
+    MangledName = NewMangledName.str();
+  if (!Suffix.empty())
+    Name += "_" + Suffix.str();
+  if (IsMask) {
+    Name += "_m";
+    MangledName += "_m";
+  }
+  // Init RISC-V extensions
+  for (const auto &T : OutInTypes) {
+    if (T->isFloatVector(16))
+      RISCVExtensions |= RISCVExtension::Zfh;
+    else if (T->isFloatVector(32))
+      RISCVExtensions |= RISCVExtension::F;
+    else if (T->isFloatVector(64))
+      RISCVExtensions |= RISCVExtension::D;
+  }
+
+  // Init OutputType and InputTypes
+  OutputType = OutInTypes[0];
+  InputTypes.assign(OutInTypes.begin() + 1, OutInTypes.end());
+  CTypeOrder.resize(InputTypes.size());
+  std::iota(CTypeOrder.begin(), CTypeOrder.end(), 0);
+  if (IsMask) {
+    if (HasVL)
+      // Builtin type order: op0, op1, ..., mask, vl
+      // C type order: mask, op0, op1, ..., vl
+      std::rotate(CTypeOrder.begin(), CTypeOrder.end() - 2,
+                  CTypeOrder.end() - 1);
+    else
+      // Builtin type order: op0, op1, ..., mask
+      // C type order: mask, op0, op1, ...,
+      std::rotate(CTypeOrder.begin(), CTypeOrder.end() - 1, CTypeOrder.end());
+  }
+  // IntrinsicTypes is nonmasked version index. Need to update it
+  // if there is maskedoff operand (It is always in first operand).
+  IntrinsicTypes = NewIntrinsicTypes;
+  if (IsMask && HasMaskedOffOperand) {
+    for (auto &I : IntrinsicTypes) {
+      if (I >= 0)
+        I += 1;
+    }
+  }
+}
+
+std::string RVVIntrinsic::getBuiltinTypeStr() const {
+  std::string S;
+  S += OutputType->getBuiltinStr();
+  for (const auto &T : InputTypes) {
+    S += T->getBuiltinStr();
+  }
+  return S;
+}
+
+void RVVIntrinsic::emitCodeGenSwitchBody(raw_ostream &OS) const {
+  OS << "  ID = Intrinsic::riscv_" + getIRName() + ";\n";
+  OS << "  IntrinsicTypes = {";
+  ListSeparator LS;
+  for (const auto &Idx : IntrinsicTypes) {
+    if (Idx == -1)
+      OS << LS << "ResultType";
+    else
+      OS << LS << "Ops[" << Idx << "]->getType()";
+  }
+
+  // VL could be i64 or i32, need to encode it in IntrinsicTypes. VL is
+  // always last operand.
+  if (hasVL())
+    OS << ", Ops[" << getNumOperand() - 1 << "]->getType()";
+  OS << "};\n";
+  OS << "  break;\n";
+}
+
+void RVVIntrinsic::emitIntrinsicMacro(raw_ostream &OS) const {
+  OS << "#define " << getName() << "(";
+  if (getNumOperand() > 0) {
+    ListSeparator LS;
+    for (const auto &I : CTypeOrder)
+      OS << LS << "op" << I;
+  }
+  OS << ") \\\n";
+  OS << "__builtin_rvv_" << getName() << "(";
+  if (getNumOperand() > 0) {
+    ListSeparator LS;
+    for (unsigned i = 0; i < InputTypes.size(); ++i)
+      OS << LS << "(" << InputTypes[i]->getTypeStr() << ")(op" << i << ")";
+  }
+  OS << ")\n";
+}
+
+void RVVIntrinsic::emitMangledFuncDef(raw_ostream &OS) const {
+  OS << OutputType->getTypeStr() << " " << getMangledName() << "(";
+  // Emit function arguments
+  if (getNumOperand() > 0) {
+    ListSeparator LS;
+    for (unsigned i = 0; i < CTypeOrder.size(); ++i)
+      OS << LS << InputTypes[CTypeOrder[i]]->getTypeStr() << " op" << i;
+  }
+  OS << "){\n";
+  OS << "  return " << getName() << "(";
+  // Emit parameter variables
+  if (getNumOperand() > 0) {
+    ListSeparator LS;
+    for (unsigned i = 0; i < CTypeOrder.size(); ++i)
+      OS << LS << "op" << i;
+  }
+  OS << ");\n";
+  OS << "}\n\n";
+}
+
+//===----------------------------------------------------------------------===//
+// RVVEmitter implementation
+//===----------------------------------------------------------------------===//
+void RVVEmitter::createHeader(raw_ostream &OS) {
+
+  OS << "/*===---- riscv_vector.h - RISC-V V-extension RVVIntrinsics "
+        "-------------------===\n"
+        " *\n"
+        " *\n"
+        " * Part of the LLVM Project, under the Apache License v2.0 with LLVM "
+        "Exceptions.\n"
+        " * See https://llvm.org/LICENSE.txt for license information.\n"
+        " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n"
+        " *\n"
+        " *===-----------------------------------------------------------------"
+        "------===\n"
+        " */\n\n";
+
+  OS << "#ifndef __RISCV_VECTOR_H\n";
+  OS << "#define __RISCV_VECTOR_H\n\n";
+
+  OS << "#include <stdint.h>\n";
+  OS << "#include <stddef.h>\n\n";
+
+  OS << "#ifndef __riscv_vector\n";
+  OS << "#error \"Vector intrinsics require the vector extension.\"\n";
+  OS << "#endif\n\n";
+
+  OS << "#ifdef __cplusplus\n";
+  OS << "extern \"C\" {\n";
+  OS << "#endif\n\n";
+
+  std::vector<std::unique_ptr<RVVIntrinsic>> Defs;
+  createRVVIntrinsics(Defs);
+
+  auto printType = [&](auto T) {
+    OS << "typedef " << T->getClangBuiltinStr() << " " << T->getTypeStr()
+       << ";\n";
+  };
+
+  constexpr int Log2LMULs[] = {-3, -2, -1, 0, 1, 2, 3};
+  // Print RVV boolean types.
+  for (int Log2LMUL : Log2LMULs) {
+    auto T = computeType('c', Log2LMUL, "m");
+    if (T.hasValue())
+      printType(T.getValue());
+  }
+  // Print RVV int/float types.
+  for (char I : StringRef("csil")) {
+    for (int Log2LMUL : Log2LMULs) {
+      auto T = computeType(I, Log2LMUL, "v");
+      if (T.hasValue()) {
+        printType(T.getValue());
+        auto UT = computeType(I, Log2LMUL, "Uv");
+        printType(UT.getValue());
+      }
+    }
+  }
+  OS << "#if defined(__riscv_zfh)\n";
+  for (int Log2LMUL : Log2LMULs) {
+    auto T = computeType('h', Log2LMUL, "v");
+    if (T.hasValue())
+      printType(T.getValue());
+  }
+  OS << "#endif\n";
+
+  OS << "#if defined(__riscv_f)\n";
+  for (int Log2LMUL : Log2LMULs) {
+    auto T = computeType('f', Log2LMUL, "v");
+    if (T.hasValue())
+      printType(T.getValue());
+  }
+  OS << "#endif\n";
+
+  OS << "#if defined(__riscv_d)\n";
+  for (int ELMul : Log2LMULs) {
+    auto T = computeType('d', ELMul, "v");
+    if (T.hasValue())
+      printType(T.getValue());
+  }
+  OS << "#endif\n\n";
+
+  // Print intrinsic functions with macro
+  emitArchMacroAndBody(Defs, OS, [](raw_ostream &OS, const RVVIntrinsic &Inst) {
+    Inst.emitIntrinsicMacro(OS);
+  });
+
+  OS << "\n#ifdef __cplusplus\n";
+  OS << "}\n";
+  OS << "#endif // __riscv_vector\n";
+  OS << "#endif // __RISCV_VECTOR_H\n";
+}
+
+void RVVEmitter::createGenericHeader(raw_ostream &OS) {
+  std::vector<std::unique_ptr<RVVIntrinsic>> Defs;
+  createRVVIntrinsics(Defs);
+
+  OS << "#include <riscv_vector.h>\n\n";
+  // Print intrinsic functions macro
+  emitArchMacroAndBody(Defs, OS, [](raw_ostream &OS, const RVVIntrinsic &Inst) {
+    if (!Inst.hasGeneric())
+      return;
+    OS << "static inline __attribute__((__always_inline__, __nodebug__, "
+          "__overloadable__))\n";
+    Inst.emitMangledFuncDef(OS);
+  });
+}
+
+void RVVEmitter::createBuiltins(raw_ostream &OS) {
+  std::vector<std::unique_ptr<RVVIntrinsic>> Defs;
+  createRVVIntrinsics(Defs);
+
+  OS << "#if defined(TARGET_BUILTIN) && !defined(RISCVV_BUILTIN)\n";
+  OS << "#define RISCVV_BUILTIN(ID, TYPE, ATTRS) TARGET_BUILTIN(ID, TYPE, "
+        "ATTRS, \"experimental-v\")\n";
+  OS << "#endif\n";
+  for (auto &Def : Defs) {
+    OS << "RISCVV_BUILTIN(__builtin_rvv_" << Def->getName() << ",\""
+       << Def->getBuiltinTypeStr() << "\", ";
+    if (!Def->hasSideEffects())
+      OS << "\"n\")\n";
+    else
+      OS << "\"\")\n";
+  }
+  OS << "\n#undef BUILTIN\n";
+  OS << "#undef RISCVV_BUILTIN\n";
+}
+
+void RVVEmitter::createCodeGen(raw_ostream &OS) {
+  std::vector<std::unique_ptr<RVVIntrinsic>> Defs;
+  createRVVIntrinsics(Defs);
+
+  // The same intrinsic IR name has the same switch body.
+  std::stable_sort(Defs.begin(), Defs.end(),
+                   [](const std::unique_ptr<RVVIntrinsic> &A,
+                      const std::unique_ptr<RVVIntrinsic> &B) {
+                     return A->getIRName() < B->getIRName();
+                   });
+  // Print switch body when the ir name changes from previous iteration.
+  RVVIntrinsic *PrevDef = Defs.begin()->get();
+  for (auto &Def : Defs) {
+    StringRef CurIRName = Def->getIRName();
+    if (CurIRName != PrevDef->getIRName()) {
+      PrevDef->emitCodeGenSwitchBody(OS);
+    }
+    PrevDef = Def.get();
+    OS << "case RISCV::BI__builtin_rvv_" << Def->getName() << ":\n";
+  }
+  Defs.back()->emitCodeGenSwitchBody(OS);
+  OS << "\n";
+}
+
+void RVVEmitter::createRVVIntrinsics(
+    std::vector<std::unique_ptr<RVVIntrinsic>> &Out) {
+
+  std::vector<Record *> RV = Records.getAllDerivedDefinitions("RVVBuiltin");
+  for (auto *R : RV) {
+    StringRef Name = R->getValueAsString("Name");
+    StringRef Suffix = R->getValueAsString("Suffix");
+    StringRef MangledName = R->getValueAsString("MangledName");
+    StringRef Prototypes = R->getValueAsString("Prototype");
+    StringRef TypeRange = R->getValueAsString("TypeRange");
+    bool HasMask = R->getValueAsBit("HasMask");
+    bool HasMaskedOffOperand = R->getValueAsBit("HasMaskedOffOperand");
+    bool HasVL = R->getValueAsBit("HasVL");
+    bool HasGeneric = R->getValueAsBit("HasGeneric");
+    bool HasSideEffects = R->getValueAsBit("HasSideEffects");
+    std::vector<int64_t> Log2LMULList = R->getValueAsListOfInts("Log2LMUL");
+    std::vector<int64_t> IntrinsicTypes =
+        R->getValueAsListOfInts("IntrinsicTypes");
+    StringRef IRName = R->getValueAsString("IRName");
+    StringRef IRNameMask = R->getValueAsString("IRNameMask");
+
+    // Parse prototype and create a list of primitive type with transformers
+    // (operand) in ProtoSeq. ProtoSeq[0] is output operand.
+    SmallVector<std::string, 8> ProtoSeq;
+    const StringRef Primaries("evwqom0ztc");
+    while (!Prototypes.empty()) {
+      auto Idx = Prototypes.find_first_of(Primaries);
+      assert(Idx != StringRef::npos);
+      ProtoSeq.push_back(Prototypes.slice(0, Idx + 1).str());
+      Prototypes = Prototypes.drop_front(Idx + 1);
+    }
+
+    // Compute Builtin types
+    SmallVector<std::string, 8> ProtoMaskSeq = ProtoSeq;
+    if (HasMask) {
+      // If HasMask, append 'm' to last operand.
+      ProtoMaskSeq.push_back("m");
+      // If HasMaskedOffOperand, insert result type as first input operand.
+      if (HasMaskedOffOperand)
+        ProtoMaskSeq.insert(ProtoMaskSeq.begin() + 1, ProtoSeq[0]);
+    }
+    // If HasVL, append 'z' to last operand
+    if (HasVL) {
+      ProtoSeq.push_back("z");
+      ProtoMaskSeq.push_back("z");
+    }
+
+    // Create intrinsics for each type and LMUL.
+    for (char I : TypeRange) {
+      for (int Log2LMUL : Log2LMULList) {
+        Optional<RVVTypes> Types = computeTypes(I, Log2LMUL, ProtoSeq);
+        // Ignored to create new intrinsic if there are any illegal types.
+        if (!Types.hasValue())
+          continue;
+
+        auto SuffixStr =
+            computeType(I, Log2LMUL, Suffix).getValue()->getShortStr();
+        // Create a non-mask intrinsic.
+        Out.push_back(std::make_unique<RVVIntrinsic>(
+            Name, SuffixStr, MangledName, IRName, HasSideEffects,
+            /*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL, HasGeneric,
+            Types.getValue(), IntrinsicTypes));
+        if (HasMask) {
+          // Create a mask intrinsic
+          Optional<RVVTypes> MaskTypes =
+              computeTypes(I, Log2LMUL, ProtoMaskSeq);
+          Out.push_back(std::make_unique<RVVIntrinsic>(
+              Name, SuffixStr, MangledName, IRNameMask, HasSideEffects,
+              /*IsMask=*/true, HasMaskedOffOperand, HasVL, HasGeneric,
+              MaskTypes.getValue(), IntrinsicTypes));
+        }
+      } // end for Log2LMUL
+    }   // end for TypeRange
+  }
+}
+
+Optional<RVVTypes>
+RVVEmitter::computeTypes(BasicType BT, int Log2LMUL,
+                         ArrayRef<std::string> PrototypeSeq) {
+  RVVTypes Types;
+  for (const std::string &Proto : PrototypeSeq) {
+    auto T = computeType(BT, Log2LMUL, Proto);
+    if (!T.hasValue())
+      return llvm::None;
+    // Record legal type index
+    Types.push_back(T.getValue());
+  }
+  return Types;
+}
+
+Optional<RVVTypePtr> RVVEmitter::computeType(BasicType BT, int Log2LMUL,
+                                             StringRef Proto) {
+  std::string Idx = Twine(Twine(BT) + Twine(Log2LMUL) + Proto).str();
+  // Search first
+  auto It = LegalTypes.find(Idx);
+  if (It != LegalTypes.end())
+    return &(It->second);
+  if (IllegalTypes.count(Idx))
+    return llvm::None;
+  // Compute type and record the result.
+  RVVType T(BT, Log2LMUL, Proto);
+  if (T.isValid()) {
+    // Record legal type index and value.
+    LegalTypes.insert({Idx, T});
+    return &(LegalTypes[Idx]);
+  }
+  // Record illegal type index.
+  IllegalTypes.insert(Idx);
+  return llvm::None;
+}
+
+void RVVEmitter::emitArchMacroAndBody(
+    std::vector<std::unique_ptr<RVVIntrinsic>> &Defs, raw_ostream &OS,
+    std::function<void(raw_ostream &, const RVVIntrinsic &)> PrintBody) {
+
+  // The same extension include in the same arch guard marco.
+  std::stable_sort(Defs.begin(), Defs.end(),
+                   [](const std::unique_ptr<RVVIntrinsic> &A,
+                      const std::unique_ptr<RVVIntrinsic> &B) {
+                     return A->getRISCVExtensions() < B->getRISCVExtensions();
+                   });
+  uint8_t PrevExt = (*Defs.begin())->getRISCVExtensions();
+  bool NeedEndif = emitExtDefStr(PrevExt, OS);
+  for (auto &Def : Defs) {
+    uint8_t CurExt = Def->getRISCVExtensions();
+    if (CurExt != PrevExt) {
+      if (NeedEndif)
+        OS << "#endif\n\n";
+      NeedEndif = emitExtDefStr(CurExt, OS);
+      PrevExt = CurExt;
+    }
+    PrintBody(OS, *Def);
+  }
+  if (NeedEndif)
+    OS << "#endif\n\n";
+}
+
+bool RVVEmitter::emitExtDefStr(uint8_t Extents, raw_ostream &OS) {
+  if (Extents == RISCVExtension::Basic)
+    return false;
+  OS << "#if ";
+  ListSeparator LS(" || ");
+  if (Extents & RISCVExtension::F)
+    OS << LS << "defined(__riscv_f)";
+  if (Extents & RISCVExtension::D)
+    OS << LS << "defined(__riscv_d)";
+  if (Extents & RISCVExtension::Zfh)
+    OS << LS << "defined(__riscv_zfh)";
+  OS << "\n";
+  return true;
+}
+
+namespace clang {
+void EmitRVVHeader(RecordKeeper &Records, raw_ostream &OS) {
+  RVVEmitter(Records).createHeader(OS);
+}
+
+void EmitRVVGenericHeader(RecordKeeper &Records, raw_ostream &OS) {
+  RVVEmitter(Records).createGenericHeader(OS);
+}
+
+void EmitRVVBuiltins(RecordKeeper &Records, raw_ostream &OS) {
+  RVVEmitter(Records).createBuiltins(OS);
+}
+
+void EmitRVVBuiltinCG(RecordKeeper &Records, raw_ostream &OS) {
+  RVVEmitter(Records).createCodeGen(OS);
+}
+
+} // End namespace clang

diff  --git a/clang/utils/TableGen/TableGen.cpp b/clang/utils/TableGen/TableGen.cpp
index 1b919a77988b..645157aef419 100644
--- a/clang/utils/TableGen/TableGen.cpp
+++ b/clang/utils/TableGen/TableGen.cpp
@@ -83,6 +83,10 @@ enum ActionType {
   GenArmCdeBuiltinSema,
   GenArmCdeBuiltinCG,
   GenArmCdeBuiltinAliases,
+  GenRISCVVectorHeader,
+  GenRISCVVectorGenericHeader,
+  GenRISCVVectorBuiltins,
+  GenRISCVVectorBuiltinCG,
   GenAttrDocs,
   GenDiagDocs,
   GenOptDocs,
@@ -228,6 +232,15 @@ cl::opt<ActionType> Action(
                    "Generate ARM CDE builtin code-generator for clang"),
         clEnumValN(GenArmCdeBuiltinAliases, "gen-arm-cde-builtin-aliases",
                    "Generate list of valid ARM CDE builtin aliases for clang"),
+        clEnumValN(GenRISCVVectorHeader, "gen-riscv-vector-header",
+                   "Generate riscv_vector.h for clang"),
+        clEnumValN(GenRISCVVectorGenericHeader,
+                   "gen-riscv-vector-generic-header",
+                   "Generate riscv_vector_generic.h for clang"),
+        clEnumValN(GenRISCVVectorBuiltins, "gen-riscv-vector-builtins",
+                   "Generate riscv_vector_builtins.inc for clang"),
+        clEnumValN(GenRISCVVectorBuiltinCG, "gen-riscv-vector-builtin-codegen",
+                   "Generate riscv_vector_builtin_cg.inc for clang"),
         clEnumValN(GenAttrDocs, "gen-attr-docs",
                    "Generate attribute documentation"),
         clEnumValN(GenDiagDocs, "gen-diag-docs",
@@ -428,6 +441,18 @@ bool ClangTableGenMain(raw_ostream &OS, RecordKeeper &Records) {
   case GenArmCdeBuiltinAliases:
     EmitCdeBuiltinAliases(Records, OS);
     break;
+  case GenRISCVVectorHeader:
+    EmitRVVHeader(Records, OS);
+    break;
+  case GenRISCVVectorGenericHeader:
+    EmitRVVGenericHeader(Records, OS);
+    break;
+  case GenRISCVVectorBuiltins:
+    EmitRVVBuiltins(Records, OS);
+    break;
+  case GenRISCVVectorBuiltinCG:
+    EmitRVVBuiltinCG(Records, OS);
+    break;
   case GenAttrDocs:
     EmitClangAttrDocs(Records, OS);
     break;

diff  --git a/clang/utils/TableGen/TableGenBackends.h b/clang/utils/TableGen/TableGenBackends.h
index 33a06bfe4469..5df149ebda19 100644
--- a/clang/utils/TableGen/TableGenBackends.h
+++ b/clang/utils/TableGen/TableGenBackends.h
@@ -106,6 +106,11 @@ void EmitMveBuiltinSema(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
 void EmitMveBuiltinCG(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
 void EmitMveBuiltinAliases(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
 
+void EmitRVVHeader(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+void EmitRVVGenericHeader(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+void EmitRVVBuiltins(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+void EmitRVVBuiltinCG(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+
 void EmitCdeHeader(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
 void EmitCdeBuiltinDef(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
 void EmitCdeBuiltinSema(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);

diff  --git a/llvm/docs/CommandGuide/tblgen.rst b/llvm/docs/CommandGuide/tblgen.rst
index 042d64953cfe..dfe93e04ccb5 100644
--- a/llvm/docs/CommandGuide/tblgen.rst
+++ b/llvm/docs/CommandGuide/tblgen.rst
@@ -541,6 +541,22 @@ clang-tblgen Options
 
   Generate list of valid ARM CDE builtin aliases for Clang.
 
+.. option:: -gen-riscv-vector-header
+
+  Generate ``riscv_vector.h`` for Clang.
+
+.. option:: -gen-riscv-vector-generic-header
+
+  Generate ``riscv_vector_generic.h`` for Clang.
+
+.. option:: -gen-riscv-vector-builtins
+
+  Generate ``riscv_vector_builtins.inc`` for Clang.
+
+.. option:: -gen-riscv-vector-builtin-codegen
+
+  Generate ``riscv_vector_builtin_cg.inc`` for Clang.
+
 .. option:: -gen-attr-docs
 
   Generate attribute documentation.


        


More information about the cfe-commits mailing list