[clang] 1a35a1b - [RISCV] Add vadd with mask and without mask builtin.

Hsiangkai Wang via cfe-commits cfe-commits at lists.llvm.org
Tue Feb 23 15:59:33 PST 2021


Author: Hsiangkai Wang
Date: 2021-02-24T07:57:31+08:00
New Revision: 1a35a1b0748639a0014eb8aec1a9c36e330c5316

URL: https://github.com/llvm/llvm-project/commit/1a35a1b0748639a0014eb8aec1a9c36e330c5316
DIFF: https://github.com/llvm/llvm-project/commit/1a35a1b0748639a0014eb8aec1a9c36e330c5316.diff

LOG: [RISCV] Add vadd with mask and without mask builtin.

Demonstrate how to add RISC-V V builtins and lower them to IR intrinsics for V extension.

Authored-by: Roger Ferrer Ibanez <rofirrim at gmail.com>
Co-Authored-by: Hsiangkai Wang <kai.wang at sifive.com>

Differential Revision: https://reviews.llvm.org/D93446

Added: 
    clang/include/clang/Basic/BuiltinsRISCV.def
    clang/test/CodeGen/RISCV/vadd.c

Modified: 
    clang/include/clang/Basic/DiagnosticSemaKinds.td
    clang/include/clang/Basic/TargetBuiltins.h
    clang/include/clang/Sema/Sema.h
    clang/include/clang/module.modulemap
    clang/lib/Basic/Targets/RISCV.cpp
    clang/lib/Basic/Targets/RISCV.h
    clang/lib/CodeGen/CGBuiltin.cpp
    clang/lib/CodeGen/CodeGenFunction.h
    clang/lib/Sema/SemaChecking.cpp

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/BuiltinsRISCV.def b/clang/include/clang/Basic/BuiltinsRISCV.def
new file mode 100644
index 000000000000..4f4ed74a11d2
--- /dev/null
+++ b/clang/include/clang/Basic/BuiltinsRISCV.def
@@ -0,0 +1,196 @@
+//==- BuiltinsRISCV.def - RISC-V Builtin function database -------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the RISC-V-specific builtin function database.  Users of
+// this file must define the BUILTIN macro to make use of this information.
+//
+//===----------------------------------------------------------------------===//
+
+#if defined(BUILTIN) && !defined(RISCVV_BUILTIN)
+#define RISCVV_BUILTIN(ID, TYPE, ATTRS) BUILTIN(ID, TYPE, ATTRS)
+#endif
+
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8m1_vl, "q8Scq8Scq8Scz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8m1_m_vl, "q8Scq8bq8Scq8Scq8Scz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16m1_vl, "q4Ssq4Ssq4Ssz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16m1_m_vl, "q4Ssq4bq4Ssq4Ssq4Ssz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i32m1_vl, "q2Siq2Siq2Siz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i32m1_m_vl, "q2Siq2bq2Siq2Siq2Siz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i64m1_vl, "q1SWiq1SWiq1SWiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i64m1_m_vl, "q1SWiq1bq1SWiq1SWiq1SWiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8m2_vl, "q16Scq16Scq16Scz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8m2_m_vl, "q16Scq16bq16Scq16Scq16Scz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16m2_vl, "q8Ssq8Ssq8Ssz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16m2_m_vl, "q8Ssq8bq8Ssq8Ssq8Ssz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i32m2_vl, "q4Siq4Siq4Siz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i32m2_m_vl, "q4Siq4bq4Siq4Siq4Siz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i64m2_vl, "q2SWiq2SWiq2SWiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i64m2_m_vl, "q2SWiq2bq2SWiq2SWiq2SWiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8m4_vl, "q32Scq32Scq32Scz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8m4_m_vl, "q32Scq32bq32Scq32Scq32Scz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16m4_vl, "q16Ssq16Ssq16Ssz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16m4_m_vl, "q16Ssq16bq16Ssq16Ssq16Ssz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i32m4_vl, "q8Siq8Siq8Siz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i32m4_m_vl, "q8Siq8bq8Siq8Siq8Siz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i64m4_vl, "q4SWiq4SWiq4SWiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i64m4_m_vl, "q4SWiq4bq4SWiq4SWiq4SWiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8m8_vl, "q64Scq64Scq64Scz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8m8_m_vl, "q64Scq64bq64Scq64Scq64Scz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16m8_vl, "q32Ssq32Ssq32Ssz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16m8_m_vl, "q32Ssq32bq32Ssq32Ssq32Ssz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i32m8_vl, "q16Siq16Siq16Siz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i32m8_m_vl, "q16Siq16bq16Siq16Siq16Siz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i64m8_vl, "q8SWiq8SWiq8SWiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i64m8_m_vl, "q8SWiq8bq8SWiq8SWiq8SWiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8mf2_vl, "q4Scq4Scq4Scz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8mf2_m_vl, "q4Scq4bq4Scq4Scq4Scz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16mf2_vl, "q2Ssq2Ssq2Ssz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16mf2_m_vl, "q2Ssq2bq2Ssq2Ssq2Ssz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i32mf2_vl, "q1Siq1Siq1Siz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i32mf2_m_vl, "q1Siq1bq1Siq1Siq1Siz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8mf4_vl, "q2Scq2Scq2Scz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8mf4_m_vl, "q2Scq2bq2Scq2Scq2Scz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16mf4_vl, "q1Ssq1Ssq1Ssz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i16mf4_m_vl, "q1Ssq1bq1Ssq1Ssq1Ssz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8mf8_vl, "q1Scq1Scq1Scz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_i8mf8_m_vl, "q1Scq1bq1Scq1Scq1Scz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8m1_vl, "q8Scq8ScScz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8m1_m_vl, "q8Scq8bq8Scq8ScScz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16m1_vl, "q4Ssq4SsSsz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16m1_m_vl, "q4Ssq4bq4Ssq4SsSsz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i32m1_vl, "q2Siq2SiSiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i32m1_m_vl, "q2Siq2bq2Siq2SiSiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i64m1_vl, "q1SWiq1SWiSWiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i64m1_m_vl, "q1SWiq1bq1SWiq1SWiSWiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8m2_vl, "q16Scq16ScScz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8m2_m_vl, "q16Scq16bq16Scq16ScScz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16m2_vl, "q8Ssq8SsSsz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16m2_m_vl, "q8Ssq8bq8Ssq8SsSsz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i32m2_vl, "q4Siq4SiSiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i32m2_m_vl, "q4Siq4bq4Siq4SiSiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i64m2_vl, "q2SWiq2SWiSWiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i64m2_m_vl, "q2SWiq2bq2SWiq2SWiSWiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8m4_vl, "q32Scq32ScScz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8m4_m_vl, "q32Scq32bq32Scq32ScScz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16m4_vl, "q16Ssq16SsSsz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16m4_m_vl, "q16Ssq16bq16Ssq16SsSsz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i32m4_vl, "q8Siq8SiSiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i32m4_m_vl, "q8Siq8bq8Siq8SiSiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i64m4_vl, "q4SWiq4SWiSWiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i64m4_m_vl, "q4SWiq4bq4SWiq4SWiSWiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8m8_vl, "q64Scq64ScScz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8m8_m_vl, "q64Scq64bq64Scq64ScScz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16m8_vl, "q32Ssq32SsSsz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16m8_m_vl, "q32Ssq32bq32Ssq32SsSsz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i32m8_vl, "q16Siq16SiSiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i32m8_m_vl, "q16Siq16bq16Siq16SiSiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i64m8_vl, "q8SWiq8SWiSWiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i64m8_m_vl, "q8SWiq8bq8SWiq8SWiSWiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8mf2_vl, "q4Scq4ScScz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8mf2_m_vl, "q4Scq4bq4Scq4ScScz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16mf2_vl, "q2Ssq2SsSsz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16mf2_m_vl, "q2Ssq2bq2Ssq2SsSsz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i32mf2_vl, "q1Siq1SiSiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i32mf2_m_vl, "q1Siq1bq1Siq1SiSiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8mf4_vl, "q2Scq2ScScz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8mf4_m_vl, "q2Scq2bq2Scq2ScScz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16mf4_vl, "q1Ssq1SsSsz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i16mf4_m_vl, "q1Ssq1bq1Ssq1SsSsz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8mf8_vl, "q1Scq1ScScz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_i8mf8_m_vl, "q1Scq1bq1Scq1ScScz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8m1_vl, "q8Ucq8Ucq8Ucz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8m1_m_vl, "q8Ucq8bq8Ucq8Ucq8Ucz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16m1_vl, "q4Usq4Usq4Usz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16m1_m_vl, "q4Usq4bq4Usq4Usq4Usz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u32m1_vl, "q2Uiq2Uiq2Uiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u32m1_m_vl, "q2Uiq2bq2Uiq2Uiq2Uiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u64m1_vl, "q1UWiq1UWiq1UWiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u64m1_m_vl, "q1UWiq1bq1UWiq1UWiq1UWiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8m2_vl, "q16Ucq16Ucq16Ucz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8m2_m_vl, "q16Ucq16bq16Ucq16Ucq16Ucz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16m2_vl, "q8Usq8Usq8Usz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16m2_m_vl, "q8Usq8bq8Usq8Usq8Usz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u32m2_vl, "q4Uiq4Uiq4Uiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u32m2_m_vl, "q4Uiq4bq4Uiq4Uiq4Uiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u64m2_vl, "q2UWiq2UWiq2UWiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u64m2_m_vl, "q2UWiq2bq2UWiq2UWiq2UWiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8m4_vl, "q32Ucq32Ucq32Ucz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8m4_m_vl, "q32Ucq32bq32Ucq32Ucq32Ucz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16m4_vl, "q16Usq16Usq16Usz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16m4_m_vl, "q16Usq16bq16Usq16Usq16Usz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u32m4_vl, "q8Uiq8Uiq8Uiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u32m4_m_vl, "q8Uiq8bq8Uiq8Uiq8Uiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u64m4_vl, "q4UWiq4UWiq4UWiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u64m4_m_vl, "q4UWiq4bq4UWiq4UWiq4UWiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8m8_vl, "q64Ucq64Ucq64Ucz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8m8_m_vl, "q64Ucq64bq64Ucq64Ucq64Ucz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16m8_vl, "q32Usq32Usq32Usz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16m8_m_vl, "q32Usq32bq32Usq32Usq32Usz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u32m8_vl, "q16Uiq16Uiq16Uiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u32m8_m_vl, "q16Uiq16bq16Uiq16Uiq16Uiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u64m8_vl, "q8UWiq8UWiq8UWiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u64m8_m_vl, "q8UWiq8bq8UWiq8UWiq8UWiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8mf2_vl, "q4Ucq4Ucq4Ucz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8mf2_m_vl, "q4Ucq4bq4Ucq4Ucq4Ucz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16mf2_vl, "q2Usq2Usq2Usz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16mf2_m_vl, "q2Usq2bq2Usq2Usq2Usz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u32mf2_vl, "q1Uiq1Uiq1Uiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u32mf2_m_vl, "q1Uiq1bq1Uiq1Uiq1Uiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8mf4_vl, "q2Ucq2Ucq2Ucz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8mf4_m_vl, "q2Ucq2bq2Ucq2Ucq2Ucz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16mf4_vl, "q1Usq1Usq1Usz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u16mf4_m_vl, "q1Usq1bq1Usq1Usq1Usz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8mf8_vl, "q1Ucq1Ucq1Ucz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vv_u8mf8_m_vl, "q1Ucq1bq1Ucq1Ucq1Ucz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8m1_vl, "q8Ucq8UcUcz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8m1_m_vl, "q8Ucq8bq8Ucq8UcUcz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16m1_vl, "q4Usq4UsUsz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16m1_m_vl, "q4Usq4bq4Usq4UsUsz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u32m1_vl, "q2Uiq2UiUiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u32m1_m_vl, "q2Uiq2bq2Uiq2UiUiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u64m1_vl, "q1UWiq1UWiUWiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u64m1_m_vl, "q1UWiq1bq1UWiq1UWiUWiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8m2_vl, "q16Ucq16UcUcz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8m2_m_vl, "q16Ucq16bq16Ucq16UcUcz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16m2_vl, "q8Usq8UsUsz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16m2_m_vl, "q8Usq8bq8Usq8UsUsz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u32m2_vl, "q4Uiq4UiUiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u32m2_m_vl, "q4Uiq4bq4Uiq4UiUiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u64m2_vl, "q2UWiq2UWiUWiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u64m2_m_vl, "q2UWiq2bq2UWiq2UWiUWiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8m4_vl, "q32Ucq32UcUcz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8m4_m_vl, "q32Ucq32bq32Ucq32UcUcz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16m4_vl, "q16Usq16UsUsz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16m4_m_vl, "q16Usq16bq16Usq16UsUsz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u32m4_vl, "q8Uiq8UiUiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u32m4_m_vl, "q8Uiq8bq8Uiq8UiUiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u64m4_vl, "q4UWiq4UWiUWiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u64m4_m_vl, "q4UWiq4bq4UWiq4UWiUWiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8m8_vl, "q64Ucq64UcUcz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8m8_m_vl, "q64Ucq64bq64Ucq64UcUcz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16m8_vl, "q32Usq32UsUsz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16m8_m_vl, "q32Usq32bq32Usq32UsUsz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u32m8_vl, "q16Uiq16UiUiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u32m8_m_vl, "q16Uiq16bq16Uiq16UiUiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u64m8_vl, "q8UWiq8UWiUWiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u64m8_m_vl, "q8UWiq8bq8UWiq8UWiUWiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8mf2_vl, "q4Ucq4UcUcz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8mf2_m_vl, "q4Ucq4bq4Ucq4UcUcz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16mf2_vl, "q2Usq2UsUsz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16mf2_m_vl, "q2Usq2bq2Usq2UsUsz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u32mf2_vl, "q1Uiq1UiUiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u32mf2_m_vl, "q1Uiq1bq1Uiq1UiUiz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8mf4_vl, "q2Ucq2UcUcz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8mf4_m_vl, "q2Ucq2bq2Ucq2UcUcz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16mf4_vl, "q1Usq1UsUsz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u16mf4_m_vl, "q1Usq1bq1Usq1UsUsz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8mf8_vl, "q1Ucq1UcUcz", "n")
+RISCVV_BUILTIN(__builtin_rvv_vadd_vx_u8mf8_m_vl, "q1Ucq1bq1Ucq1UcUcz", "n")
+
+#undef BUILTIN
+#undef RISCVV_BUILTIN

diff  --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td
index f4458ade4675..40de2d8e5977 100644
--- a/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -11134,4 +11134,8 @@ def err_tcb_conflicting_attributes : Error<
 def warn_tcb_enforcement_violation : Warning<
   "calling %0 is a violation of trusted computing base '%1'">,
   InGroup<DiagGroup<"tcb-enforcement">>;
+
+// RISC-V V-extension
+def err_riscvv_builtin_requires_v : Error<
+   "builtin requires 'V' extension support to be enabled">;
 } // end of sema component.

diff  --git a/clang/include/clang/Basic/TargetBuiltins.h b/clang/include/clang/Basic/TargetBuiltins.h
index b472547012f0..0d99ffc8ffce 100644
--- a/clang/include/clang/Basic/TargetBuiltins.h
+++ b/clang/include/clang/Basic/TargetBuiltins.h
@@ -124,6 +124,16 @@ namespace clang {
   enum { LastTIBuiltin = clang::Builtin::FirstTSBuiltin - 1, LastTSBuiltin };
   }
 
+  /// RISCV builtins
+  namespace RISCV {
+  enum {
+    LastTIBuiltin = clang::Builtin::FirstTSBuiltin - 1,
+#define BUILTIN(ID, TYPE, ATTRS) BI##ID,
+#include "clang/Basic/BuiltinsRISCV.def"
+    LastTSBuiltin
+  };
+  } // namespace RISCV
+
   /// Flags to identify the types for overloaded Neon builtins.
   ///
   /// These must be kept in sync with the flags in utils/TableGen/NeonEmitter.h.

diff  --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h
index 3f5d8fd41117..44eaa7948a7e 100644
--- a/clang/include/clang/Sema/Sema.h
+++ b/clang/include/clang/Sema/Sema.h
@@ -12405,6 +12405,8 @@ class Sema final {
   bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
                                    CallExpr *TheCall);
   bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
+  bool CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+                                     CallExpr *TheCall);
 
   bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
   bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);

diff  --git a/clang/include/clang/module.modulemap b/clang/include/clang/module.modulemap
index b99071674984..458d91cb9d95 100644
--- a/clang/include/clang/module.modulemap
+++ b/clang/include/clang/module.modulemap
@@ -45,6 +45,7 @@ module Clang_Basic {
   textual header "Basic/BuiltinsNEON.def"
   textual header "Basic/BuiltinsNVPTX.def"
   textual header "Basic/BuiltinsPPC.def"
+  textual header "Basic/BuiltinsRISCV.def"
   textual header "Basic/BuiltinsSVE.def"
   textual header "Basic/BuiltinsSystemZ.def"
   textual header "Basic/BuiltinsWebAssembly.def"

diff  --git a/clang/lib/Basic/Targets/RISCV.cpp b/clang/lib/Basic/Targets/RISCV.cpp
index 786201ea340d..f3e29af93d81 100644
--- a/clang/lib/Basic/Targets/RISCV.cpp
+++ b/clang/lib/Basic/Targets/RISCV.cpp
@@ -12,6 +12,7 @@
 
 #include "RISCV.h"
 #include "clang/Basic/MacroBuilder.h"
+#include "clang/Basic/TargetBuiltins.h"
 #include "llvm/ADT/StringSwitch.h"
 #include "llvm/Support/TargetParser.h"
 
@@ -197,6 +198,17 @@ void RISCVTargetInfo::getTargetDefines(const LangOptions &Opts,
     Builder.defineMacro("__riscv_zvlsseg", "10000");
 }
 
+const Builtin::Info RISCVTargetInfo::BuiltinInfo[] = {
+#define BUILTIN(ID, TYPE, ATTRS)                                               \
+  {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
+#include "clang/Basic/BuiltinsRISCV.def"
+};
+
+ArrayRef<Builtin::Info> RISCVTargetInfo::getTargetBuiltins() const {
+  return llvm::makeArrayRef(BuiltinInfo, clang::RISCV::LastTSBuiltin -
+                                             Builtin::FirstTSBuiltin);
+}
+
 /// Return true if has this feature, need to sync with handleTargetFeatures.
 bool RISCVTargetInfo::hasFeature(StringRef Feature) const {
   bool Is64Bit = getTriple().getArch() == llvm::Triple::riscv64;

diff  --git a/clang/lib/Basic/Targets/RISCV.h b/clang/lib/Basic/Targets/RISCV.h
index 13695e638c0c..abae51e75a19 100644
--- a/clang/lib/Basic/Targets/RISCV.h
+++ b/clang/lib/Basic/Targets/RISCV.h
@@ -47,6 +47,8 @@ class RISCVTargetInfo : public TargetInfo {
   bool HasZvamo = false;
   bool HasZvlsseg = false;
 
+  static const Builtin::Info BuiltinInfo[];
+
 public:
   RISCVTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
       : TargetInfo(Triple) {
@@ -70,7 +72,7 @@ class RISCVTargetInfo : public TargetInfo {
   void getTargetDefines(const LangOptions &Opts,
                         MacroBuilder &Builder) const override;
 
-  ArrayRef<Builtin::Info> getTargetBuiltins() const override { return None; }
+  ArrayRef<Builtin::Info> getTargetBuiltins() const override;
 
   BuiltinVaListKind getBuiltinVaListKind() const override {
     return TargetInfo::VoidPtrBuiltinVaList;

diff  --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 04289bf885b7..5409a12698c4 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -42,6 +42,7 @@
 #include "llvm/IR/IntrinsicsNVPTX.h"
 #include "llvm/IR/IntrinsicsPowerPC.h"
 #include "llvm/IR/IntrinsicsR600.h"
+#include "llvm/IR/IntrinsicsRISCV.h"
 #include "llvm/IR/IntrinsicsS390.h"
 #include "llvm/IR/IntrinsicsWebAssembly.h"
 #include "llvm/IR/IntrinsicsX86.h"
@@ -5165,6 +5166,9 @@ static Value *EmitTargetArchBuiltinExpr(CodeGenFunction *CGF,
     return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E);
   case llvm::Triple::hexagon:
     return CGF->EmitHexagonBuiltinExpr(BuiltinID, E);
+  case llvm::Triple::riscv32:
+  case llvm::Triple::riscv64:
+    return CGF->EmitRISCVBuiltinExpr(BuiltinID, E, ReturnValue);
   default:
     return nullptr;
   }
@@ -17729,3 +17733,215 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
 
   return nullptr;
 }
+
+Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
+                                             const CallExpr *E,
+                                             ReturnValueSlot ReturnValue) {
+  SmallVector<Value *, 4> Ops;
+  llvm::Type *ResultType = ConvertType(E->getType());
+
+  for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
+    Ops.push_back(EmitScalarExpr(E->getArg(i)));
+
+  Intrinsic::ID ID = Intrinsic::not_intrinsic;
+
+  // Required for overloaded intrinsics.
+  llvm::SmallVector<llvm::Type *, 2> IntrinsicTypes;
+  switch (BuiltinID) {
+  // We could generate all the possible combinations and handling code in
+  // a file and include it here, instead of listing all the builtins plainly.
+  // Something like
+  // #include clang/Basic/RISCVVBuiltinCodeGen.inc
+  case RISCV::BI__builtin_rvv_vadd_vv_i8m1_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i16m1_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i32m1_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i64m1_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i8m2_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i16m2_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i32m2_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i64m2_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i8m4_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i16m4_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i32m4_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i64m4_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i8m8_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i16m8_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i32m8_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i64m8_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i8mf2_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i16mf2_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i32mf2_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i8mf4_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i16mf4_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i8mf8_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i8m1_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i16m1_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i32m1_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i64m1_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i8m2_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i16m2_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i32m2_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i64m2_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i8m4_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i16m4_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i32m4_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i64m4_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i8m8_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i16m8_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i32m8_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i64m8_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i8mf2_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i16mf2_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i32mf2_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i8mf4_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i16mf4_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i8mf8_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u8m1_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u16m1_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u32m1_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u64m1_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u8m2_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u16m2_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u32m2_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u64m2_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u8m4_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u16m4_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u32m4_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u64m4_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u8m8_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u16m8_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u32m8_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u64m8_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u8mf2_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u16mf2_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u32mf2_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u8mf4_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u16mf4_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u8mf8_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u8m1_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u16m1_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u32m1_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u64m1_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u8m2_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u16m2_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u32m2_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u64m2_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u8m4_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u16m4_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u32m4_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u64m4_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u8m8_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u16m8_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u32m8_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u64m8_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u8mf2_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u16mf2_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u32mf2_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u8mf4_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u16mf4_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u8mf8_vl:
+    // The order of operands is (op1, op2, vl).
+    ID = Intrinsic::riscv_vadd;
+    IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[2]->getType()};
+    break;
+  case RISCV::BI__builtin_rvv_vadd_vv_i8m1_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i16m1_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i32m1_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i64m1_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i8m2_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i16m2_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i32m2_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i64m2_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i8m4_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i16m4_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i32m4_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i64m4_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i8m8_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i16m8_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i32m8_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i64m8_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i8mf2_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i16mf2_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i32mf2_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i8mf4_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i16mf4_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_i8mf8_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i8m1_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i16m1_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i32m1_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i64m1_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i8m2_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i16m2_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i32m2_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i64m2_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i8m4_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i16m4_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i32m4_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i64m4_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i8m8_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i16m8_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i32m8_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i64m8_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i8mf2_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i16mf2_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i32mf2_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i8mf4_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i16mf4_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_i8mf8_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u8m1_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u16m1_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u32m1_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u64m1_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u8m2_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u16m2_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u32m2_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u64m2_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u8m4_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u16m4_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u32m4_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u64m4_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u8m8_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u16m8_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u32m8_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u64m8_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u8mf2_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u16mf2_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u32mf2_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u8mf4_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u16mf4_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vv_u8mf8_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u8m1_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u16m1_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u32m1_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u64m1_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u8m2_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u16m2_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u32m2_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u64m2_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u8m4_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u16m4_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u32m4_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u64m4_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u8m8_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u16m8_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u32m8_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u64m8_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u8mf2_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u16mf2_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u32mf2_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u8mf4_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u16mf4_m_vl:
+  case RISCV::BI__builtin_rvv_vadd_vx_u8mf8_m_vl:
+    ID = Intrinsic::riscv_vadd_mask;
+    // The order of operands is (mask, maskedoff, op1, op2, vl).
+    IntrinsicTypes = {ResultType, Ops[3]->getType(), Ops[4]->getType()};
+    // The order of intrinsic operands is (maskedoff, op1, op2, mask, vl).
+    std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
+    break;
+  }
+
+  assert(ID != Intrinsic::not_intrinsic);
+
+  llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+  return Builder.CreateCall(F, Ops, "");
+}

diff  --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h
index 524c9c5e7077..8ef0de018a98 100644
--- a/clang/lib/CodeGen/CodeGenFunction.h
+++ b/clang/lib/CodeGen/CodeGenFunction.h
@@ -4129,6 +4129,8 @@ class CodeGenFunction : public CodeGenTypeCache {
   llvm::Value *EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
                                           const CallExpr *E);
   llvm::Value *EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+  llvm::Value *EmitRISCVBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
+                                    ReturnValueSlot ReturnValue);
   bool ProcessOrderScopeAMDGCN(llvm::Value *Order, llvm::Value *Scope,
                                llvm::AtomicOrdering &AO,
                                llvm::SyncScope::ID &SSID);

diff  --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index b41d94361d50..2c19e91c906e 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -1429,6 +1429,9 @@ bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
     return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall);
   case llvm::Triple::amdgcn:
     return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall);
+  case llvm::Triple::riscv32:
+  case llvm::Triple::riscv64:
+    return CheckRISCVBuiltinFunctionCall(TI, BuiltinID, TheCall);
   }
 }
 
@@ -3383,6 +3386,23 @@ bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
   return false;
 }
 
+bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI,
+                                         unsigned BuiltinID,
+                                         CallExpr *TheCall) {
+  switch (BuiltinID) {
+  default:
+    break;
+#define BUILTIN(ID, TYPE, ATTRS) case RISCV::BI##ID:
+#include "clang/Basic/BuiltinsRISCV.def"
+    if (!TI.hasFeature("experimental-v"))
+      return Diag(TheCall->getBeginLoc(), diag::err_riscvv_builtin_requires_v)
+             << TheCall->getSourceRange();
+    break;
+  }
+
+  return false;
+}
+
 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
                                            CallExpr *TheCall) {
   if (BuiltinID == SystemZ::BI__builtin_tabort) {

diff  --git a/clang/test/CodeGen/RISCV/vadd.c b/clang/test/CodeGen/RISCV/vadd.c
new file mode 100644
index 000000000000..cc2e52f725a6
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/vadd.c
@@ -0,0 +1,2648 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v \
+// RUN:   -O2 -emit-llvm %s -o - | FileCheck --check-prefix=CHECK-RV64-O2 %s
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v \
+// RUN:   -O2 -emit-llvm %s -o - | FileCheck --check-prefix=CHECK-RV32-O2 %s
+
+#include <stddef.h>
+#include <stdint.h>
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m1_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ARG_0:%.*]], <vscale x 8 x i8> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m1_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ARG_0:%.*]], <vscale x 8 x i8> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+__rvv_int8m1_t test_vadd_vv_i8m1_vl(__rvv_int8m1_t arg_0, __rvv_int8m1_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_i8m1_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m1_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ARG_1:%.*]], <vscale x 8 x i8> [[ARG_2:%.*]], <vscale x 8 x i8> [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m1_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ARG_1:%.*]], <vscale x 8 x i8> [[ARG_2:%.*]], <vscale x 8 x i8> [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+__rvv_int8m1_t test_vadd_vv_i8m1_m_vl(__rvv_bool8_t arg_0, __rvv_int8m1_t arg_1, __rvv_int8m1_t arg_2, __rvv_int8m1_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_i8m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m1_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ARG_0:%.*]], <vscale x 4 x i16> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m1_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ARG_0:%.*]], <vscale x 4 x i16> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+__rvv_int16m1_t test_vadd_vv_i16m1_vl(__rvv_int16m1_t arg_0, __rvv_int16m1_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_i16m1_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m1_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ARG_1:%.*]], <vscale x 4 x i16> [[ARG_2:%.*]], <vscale x 4 x i16> [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m1_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ARG_1:%.*]], <vscale x 4 x i16> [[ARG_2:%.*]], <vscale x 4 x i16> [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+__rvv_int16m1_t test_vadd_vv_i16m1_m_vl(__rvv_bool16_t arg_0, __rvv_int16m1_t arg_1, __rvv_int16m1_t arg_2, __rvv_int16m1_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_i16m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m1_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ARG_0:%.*]], <vscale x 2 x i32> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m1_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ARG_0:%.*]], <vscale x 2 x i32> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+__rvv_int32m1_t test_vadd_vv_i32m1_vl(__rvv_int32m1_t arg_0, __rvv_int32m1_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_i32m1_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m1_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ARG_1:%.*]], <vscale x 2 x i32> [[ARG_2:%.*]], <vscale x 2 x i32> [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m1_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ARG_1:%.*]], <vscale x 2 x i32> [[ARG_2:%.*]], <vscale x 2 x i32> [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+__rvv_int32m1_t test_vadd_vv_i32m1_m_vl(__rvv_bool32_t arg_0, __rvv_int32m1_t arg_1, __rvv_int32m1_t arg_2, __rvv_int32m1_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_i32m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m1_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ARG_0:%.*]], <vscale x 1 x i64> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m1_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ARG_0:%.*]], <vscale x 1 x i64> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+__rvv_int64m1_t test_vadd_vv_i64m1_vl(__rvv_int64m1_t arg_0, __rvv_int64m1_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_i64m1_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m1_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ARG_1:%.*]], <vscale x 1 x i64> [[ARG_2:%.*]], <vscale x 1 x i64> [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m1_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ARG_1:%.*]], <vscale x 1 x i64> [[ARG_2:%.*]], <vscale x 1 x i64> [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+__rvv_int64m1_t test_vadd_vv_i64m1_m_vl(__rvv_bool64_t arg_0, __rvv_int64m1_t arg_1, __rvv_int64m1_t arg_2, __rvv_int64m1_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_i64m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m2_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ARG_0:%.*]], <vscale x 16 x i8> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m2_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ARG_0:%.*]], <vscale x 16 x i8> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+__rvv_int8m2_t test_vadd_vv_i8m2_vl(__rvv_int8m2_t arg_0, __rvv_int8m2_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_i8m2_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m2_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ARG_1:%.*]], <vscale x 16 x i8> [[ARG_2:%.*]], <vscale x 16 x i8> [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m2_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ARG_1:%.*]], <vscale x 16 x i8> [[ARG_2:%.*]], <vscale x 16 x i8> [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+__rvv_int8m2_t test_vadd_vv_i8m2_m_vl(__rvv_bool4_t arg_0, __rvv_int8m2_t arg_1, __rvv_int8m2_t arg_2, __rvv_int8m2_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_i8m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m2_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ARG_0:%.*]], <vscale x 8 x i16> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m2_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ARG_0:%.*]], <vscale x 8 x i16> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+__rvv_int16m2_t test_vadd_vv_i16m2_vl(__rvv_int16m2_t arg_0, __rvv_int16m2_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_i16m2_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m2_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ARG_1:%.*]], <vscale x 8 x i16> [[ARG_2:%.*]], <vscale x 8 x i16> [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m2_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ARG_1:%.*]], <vscale x 8 x i16> [[ARG_2:%.*]], <vscale x 8 x i16> [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+__rvv_int16m2_t test_vadd_vv_i16m2_m_vl(__rvv_bool8_t arg_0, __rvv_int16m2_t arg_1, __rvv_int16m2_t arg_2, __rvv_int16m2_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_i16m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m2_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ARG_0:%.*]], <vscale x 4 x i32> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m2_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ARG_0:%.*]], <vscale x 4 x i32> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+__rvv_int32m2_t test_vadd_vv_i32m2_vl(__rvv_int32m2_t arg_0, __rvv_int32m2_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_i32m2_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m2_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ARG_1:%.*]], <vscale x 4 x i32> [[ARG_2:%.*]], <vscale x 4 x i32> [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m2_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ARG_1:%.*]], <vscale x 4 x i32> [[ARG_2:%.*]], <vscale x 4 x i32> [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+__rvv_int32m2_t test_vadd_vv_i32m2_m_vl(__rvv_bool16_t arg_0, __rvv_int32m2_t arg_1, __rvv_int32m2_t arg_2, __rvv_int32m2_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_i32m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m2_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ARG_0:%.*]], <vscale x 2 x i64> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m2_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ARG_0:%.*]], <vscale x 2 x i64> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+__rvv_int64m2_t test_vadd_vv_i64m2_vl(__rvv_int64m2_t arg_0, __rvv_int64m2_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_i64m2_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m2_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ARG_1:%.*]], <vscale x 2 x i64> [[ARG_2:%.*]], <vscale x 2 x i64> [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m2_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ARG_1:%.*]], <vscale x 2 x i64> [[ARG_2:%.*]], <vscale x 2 x i64> [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+__rvv_int64m2_t test_vadd_vv_i64m2_m_vl(__rvv_bool32_t arg_0, __rvv_int64m2_t arg_1, __rvv_int64m2_t arg_2, __rvv_int64m2_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_i64m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m4_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ARG_0:%.*]], <vscale x 32 x i8> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m4_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ARG_0:%.*]], <vscale x 32 x i8> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+__rvv_int8m4_t test_vadd_vv_i8m4_vl(__rvv_int8m4_t arg_0, __rvv_int8m4_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_i8m4_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m4_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ARG_1:%.*]], <vscale x 32 x i8> [[ARG_2:%.*]], <vscale x 32 x i8> [[ARG_3:%.*]], <vscale x 32 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m4_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ARG_1:%.*]], <vscale x 32 x i8> [[ARG_2:%.*]], <vscale x 32 x i8> [[ARG_3:%.*]], <vscale x 32 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+__rvv_int8m4_t test_vadd_vv_i8m4_m_vl(__rvv_bool2_t arg_0, __rvv_int8m4_t arg_1, __rvv_int8m4_t arg_2, __rvv_int8m4_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_i8m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m4_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ARG_0:%.*]], <vscale x 16 x i16> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m4_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ARG_0:%.*]], <vscale x 16 x i16> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+__rvv_int16m4_t test_vadd_vv_i16m4_vl(__rvv_int16m4_t arg_0, __rvv_int16m4_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_i16m4_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m4_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ARG_1:%.*]], <vscale x 16 x i16> [[ARG_2:%.*]], <vscale x 16 x i16> [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m4_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ARG_1:%.*]], <vscale x 16 x i16> [[ARG_2:%.*]], <vscale x 16 x i16> [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+__rvv_int16m4_t test_vadd_vv_i16m4_m_vl(__rvv_bool4_t arg_0, __rvv_int16m4_t arg_1, __rvv_int16m4_t arg_2, __rvv_int16m4_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_i16m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m4_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ARG_0:%.*]], <vscale x 8 x i32> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m4_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ARG_0:%.*]], <vscale x 8 x i32> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+__rvv_int32m4_t test_vadd_vv_i32m4_vl(__rvv_int32m4_t arg_0, __rvv_int32m4_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_i32m4_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m4_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ARG_1:%.*]], <vscale x 8 x i32> [[ARG_2:%.*]], <vscale x 8 x i32> [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m4_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ARG_1:%.*]], <vscale x 8 x i32> [[ARG_2:%.*]], <vscale x 8 x i32> [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+__rvv_int32m4_t test_vadd_vv_i32m4_m_vl(__rvv_bool8_t arg_0, __rvv_int32m4_t arg_1, __rvv_int32m4_t arg_2, __rvv_int32m4_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_i32m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m4_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ARG_0:%.*]], <vscale x 4 x i64> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m4_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ARG_0:%.*]], <vscale x 4 x i64> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+__rvv_int64m4_t test_vadd_vv_i64m4_vl(__rvv_int64m4_t arg_0, __rvv_int64m4_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_i64m4_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m4_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ARG_1:%.*]], <vscale x 4 x i64> [[ARG_2:%.*]], <vscale x 4 x i64> [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m4_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ARG_1:%.*]], <vscale x 4 x i64> [[ARG_2:%.*]], <vscale x 4 x i64> [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+__rvv_int64m4_t test_vadd_vv_i64m4_m_vl(__rvv_bool16_t arg_0, __rvv_int64m4_t arg_1, __rvv_int64m4_t arg_2, __rvv_int64m4_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_i64m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m8_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ARG_0:%.*]], <vscale x 64 x i8> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m8_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ARG_0:%.*]], <vscale x 64 x i8> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+__rvv_int8m8_t test_vadd_vv_i8m8_vl(__rvv_int8m8_t arg_0, __rvv_int8m8_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_i8m8_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8m8_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ARG_1:%.*]], <vscale x 64 x i8> [[ARG_2:%.*]], <vscale x 64 x i8> [[ARG_3:%.*]], <vscale x 64 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8m8_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ARG_1:%.*]], <vscale x 64 x i8> [[ARG_2:%.*]], <vscale x 64 x i8> [[ARG_3:%.*]], <vscale x 64 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+__rvv_int8m8_t test_vadd_vv_i8m8_m_vl(__rvv_bool1_t arg_0, __rvv_int8m8_t arg_1, __rvv_int8m8_t arg_2, __rvv_int8m8_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_i8m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m8_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ARG_0:%.*]], <vscale x 32 x i16> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m8_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ARG_0:%.*]], <vscale x 32 x i16> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+__rvv_int16m8_t test_vadd_vv_i16m8_vl(__rvv_int16m8_t arg_0, __rvv_int16m8_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_i16m8_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16m8_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ARG_1:%.*]], <vscale x 32 x i16> [[ARG_2:%.*]], <vscale x 32 x i16> [[ARG_3:%.*]], <vscale x 32 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16m8_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ARG_1:%.*]], <vscale x 32 x i16> [[ARG_2:%.*]], <vscale x 32 x i16> [[ARG_3:%.*]], <vscale x 32 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+__rvv_int16m8_t test_vadd_vv_i16m8_m_vl(__rvv_bool2_t arg_0, __rvv_int16m8_t arg_1, __rvv_int16m8_t arg_2, __rvv_int16m8_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_i16m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m8_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ARG_0:%.*]], <vscale x 16 x i32> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m8_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ARG_0:%.*]], <vscale x 16 x i32> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+__rvv_int32m8_t test_vadd_vv_i32m8_vl(__rvv_int32m8_t arg_0, __rvv_int32m8_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_i32m8_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32m8_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ARG_1:%.*]], <vscale x 16 x i32> [[ARG_2:%.*]], <vscale x 16 x i32> [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32m8_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ARG_1:%.*]], <vscale x 16 x i32> [[ARG_2:%.*]], <vscale x 16 x i32> [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+__rvv_int32m8_t test_vadd_vv_i32m8_m_vl(__rvv_bool4_t arg_0, __rvv_int32m8_t arg_1, __rvv_int32m8_t arg_2, __rvv_int32m8_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_i32m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m8_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ARG_0:%.*]], <vscale x 8 x i64> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m8_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ARG_0:%.*]], <vscale x 8 x i64> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+__rvv_int64m8_t test_vadd_vv_i64m8_vl(__rvv_int64m8_t arg_0, __rvv_int64m8_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_i64m8_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i64m8_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ARG_1:%.*]], <vscale x 8 x i64> [[ARG_2:%.*]], <vscale x 8 x i64> [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i64m8_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ARG_1:%.*]], <vscale x 8 x i64> [[ARG_2:%.*]], <vscale x 8 x i64> [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+__rvv_int64m8_t test_vadd_vv_i64m8_m_vl(__rvv_bool8_t arg_0, __rvv_int64m8_t arg_1, __rvv_int64m8_t arg_2, __rvv_int64m8_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_i64m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8mf2_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ARG_0:%.*]], <vscale x 4 x i8> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8mf2_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ARG_0:%.*]], <vscale x 4 x i8> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+__rvv_int8mf2_t test_vadd_vv_i8mf2_vl(__rvv_int8mf2_t arg_0, __rvv_int8mf2_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_i8mf2_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8mf2_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ARG_1:%.*]], <vscale x 4 x i8> [[ARG_2:%.*]], <vscale x 4 x i8> [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8mf2_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ARG_1:%.*]], <vscale x 4 x i8> [[ARG_2:%.*]], <vscale x 4 x i8> [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+__rvv_int8mf2_t test_vadd_vv_i8mf2_m_vl(__rvv_bool16_t arg_0, __rvv_int8mf2_t arg_1, __rvv_int8mf2_t arg_2, __rvv_int8mf2_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_i8mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16mf2_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ARG_0:%.*]], <vscale x 2 x i16> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16mf2_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ARG_0:%.*]], <vscale x 2 x i16> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+__rvv_int16mf2_t test_vadd_vv_i16mf2_vl(__rvv_int16mf2_t arg_0, __rvv_int16mf2_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_i16mf2_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16mf2_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ARG_1:%.*]], <vscale x 2 x i16> [[ARG_2:%.*]], <vscale x 2 x i16> [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16mf2_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ARG_1:%.*]], <vscale x 2 x i16> [[ARG_2:%.*]], <vscale x 2 x i16> [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+__rvv_int16mf2_t test_vadd_vv_i16mf2_m_vl(__rvv_bool32_t arg_0, __rvv_int16mf2_t arg_1, __rvv_int16mf2_t arg_2, __rvv_int16mf2_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_i16mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32mf2_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ARG_0:%.*]], <vscale x 1 x i32> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32mf2_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ARG_0:%.*]], <vscale x 1 x i32> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+__rvv_int32mf2_t test_vadd_vv_i32mf2_vl(__rvv_int32mf2_t arg_0, __rvv_int32mf2_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_i32mf2_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i32mf2_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ARG_1:%.*]], <vscale x 1 x i32> [[ARG_2:%.*]], <vscale x 1 x i32> [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i32mf2_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ARG_1:%.*]], <vscale x 1 x i32> [[ARG_2:%.*]], <vscale x 1 x i32> [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+__rvv_int32mf2_t test_vadd_vv_i32mf2_m_vl(__rvv_bool64_t arg_0, __rvv_int32mf2_t arg_1, __rvv_int32mf2_t arg_2, __rvv_int32mf2_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_i32mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8mf4_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ARG_0:%.*]], <vscale x 2 x i8> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8mf4_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ARG_0:%.*]], <vscale x 2 x i8> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+__rvv_int8mf4_t test_vadd_vv_i8mf4_vl(__rvv_int8mf4_t arg_0, __rvv_int8mf4_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_i8mf4_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8mf4_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ARG_1:%.*]], <vscale x 2 x i8> [[ARG_2:%.*]], <vscale x 2 x i8> [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8mf4_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ARG_1:%.*]], <vscale x 2 x i8> [[ARG_2:%.*]], <vscale x 2 x i8> [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+__rvv_int8mf4_t test_vadd_vv_i8mf4_m_vl(__rvv_bool32_t arg_0, __rvv_int8mf4_t arg_1, __rvv_int8mf4_t arg_2, __rvv_int8mf4_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_i8mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16mf4_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ARG_0:%.*]], <vscale x 1 x i16> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16mf4_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ARG_0:%.*]], <vscale x 1 x i16> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+__rvv_int16mf4_t test_vadd_vv_i16mf4_vl(__rvv_int16mf4_t arg_0, __rvv_int16mf4_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_i16mf4_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i16mf4_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ARG_1:%.*]], <vscale x 1 x i16> [[ARG_2:%.*]], <vscale x 1 x i16> [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i16mf4_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ARG_1:%.*]], <vscale x 1 x i16> [[ARG_2:%.*]], <vscale x 1 x i16> [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+__rvv_int16mf4_t test_vadd_vv_i16mf4_m_vl(__rvv_bool64_t arg_0, __rvv_int16mf4_t arg_1, __rvv_int16mf4_t arg_2, __rvv_int16mf4_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_i16mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8mf8_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ARG_0:%.*]], <vscale x 1 x i8> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8mf8_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ARG_0:%.*]], <vscale x 1 x i8> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+__rvv_int8mf8_t test_vadd_vv_i8mf8_vl(__rvv_int8mf8_t arg_0, __rvv_int8mf8_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_i8mf8_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_i8mf8_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ARG_1:%.*]], <vscale x 1 x i8> [[ARG_2:%.*]], <vscale x 1 x i8> [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_i8mf8_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ARG_1:%.*]], <vscale x 1 x i8> [[ARG_2:%.*]], <vscale x 1 x i8> [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+__rvv_int8mf8_t test_vadd_vv_i8mf8_m_vl(__rvv_bool64_t arg_0, __rvv_int8mf8_t arg_1, __rvv_int8mf8_t arg_2, __rvv_int8mf8_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_i8mf8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m1_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m1_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i32(<vscale x 8 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+__rvv_int8m1_t test_vadd_vx_i8m1_vl(__rvv_int8m1_t arg_0, int8_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_i8m1_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m1_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ARG_1:%.*]], <vscale x 8 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m1_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ARG_1:%.*]], <vscale x 8 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+__rvv_int8m1_t test_vadd_vx_i8m1_m_vl(__rvv_bool8_t arg_0, __rvv_int8m1_t arg_1, __rvv_int8m1_t arg_2, int8_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_i8m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m1_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m1_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i32(<vscale x 4 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+__rvv_int16m1_t test_vadd_vx_i16m1_vl(__rvv_int16m1_t arg_0, int16_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_i16m1_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m1_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ARG_1:%.*]], <vscale x 4 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m1_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ARG_1:%.*]], <vscale x 4 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+__rvv_int16m1_t test_vadd_vx_i16m1_m_vl(__rvv_bool16_t arg_0, __rvv_int16m1_t arg_1, __rvv_int16m1_t arg_2, int16_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_i16m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m1_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m1_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i32(<vscale x 2 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+__rvv_int32m1_t test_vadd_vx_i32m1_vl(__rvv_int32m1_t arg_0, int32_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_i32m1_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m1_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ARG_1:%.*]], <vscale x 2 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m1_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ARG_1:%.*]], <vscale x 2 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+__rvv_int32m1_t test_vadd_vx_i32m1_m_vl(__rvv_bool32_t arg_0, __rvv_int32m1_t arg_1, __rvv_int32m1_t arg_2, int32_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_i32m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m1_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m1_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i32(<vscale x 1 x i64> [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+__rvv_int64m1_t test_vadd_vx_i64m1_vl(__rvv_int64m1_t arg_0, int64_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_i64m1_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m1_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ARG_1:%.*]], <vscale x 1 x i64> [[ARG_2:%.*]], i64 [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m1_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ARG_1:%.*]], <vscale x 1 x i64> [[ARG_2:%.*]], i64 [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+__rvv_int64m1_t test_vadd_vx_i64m1_m_vl(__rvv_bool64_t arg_0, __rvv_int64m1_t arg_1, __rvv_int64m1_t arg_2, int64_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_i64m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m2_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m2_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i32(<vscale x 16 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+__rvv_int8m2_t test_vadd_vx_i8m2_vl(__rvv_int8m2_t arg_0, int8_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_i8m2_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m2_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ARG_1:%.*]], <vscale x 16 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m2_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ARG_1:%.*]], <vscale x 16 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+__rvv_int8m2_t test_vadd_vx_i8m2_m_vl(__rvv_bool4_t arg_0, __rvv_int8m2_t arg_1, __rvv_int8m2_t arg_2, int8_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_i8m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m2_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m2_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i32(<vscale x 8 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+__rvv_int16m2_t test_vadd_vx_i16m2_vl(__rvv_int16m2_t arg_0, int16_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_i16m2_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m2_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ARG_1:%.*]], <vscale x 8 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m2_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ARG_1:%.*]], <vscale x 8 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+__rvv_int16m2_t test_vadd_vx_i16m2_m_vl(__rvv_bool8_t arg_0, __rvv_int16m2_t arg_1, __rvv_int16m2_t arg_2, int16_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_i16m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m2_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m2_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i32(<vscale x 4 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+__rvv_int32m2_t test_vadd_vx_i32m2_vl(__rvv_int32m2_t arg_0, int32_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_i32m2_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m2_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ARG_1:%.*]], <vscale x 4 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m2_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ARG_1:%.*]], <vscale x 4 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+__rvv_int32m2_t test_vadd_vx_i32m2_m_vl(__rvv_bool16_t arg_0, __rvv_int32m2_t arg_1, __rvv_int32m2_t arg_2, int32_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_i32m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m2_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m2_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i32(<vscale x 2 x i64> [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+__rvv_int64m2_t test_vadd_vx_i64m2_vl(__rvv_int64m2_t arg_0, int64_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_i64m2_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m2_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ARG_1:%.*]], <vscale x 2 x i64> [[ARG_2:%.*]], i64 [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m2_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ARG_1:%.*]], <vscale x 2 x i64> [[ARG_2:%.*]], i64 [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+__rvv_int64m2_t test_vadd_vx_i64m2_m_vl(__rvv_bool32_t arg_0, __rvv_int64m2_t arg_1, __rvv_int64m2_t arg_2, int64_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_i64m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m4_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m4_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i32(<vscale x 32 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+__rvv_int8m4_t test_vadd_vx_i8m4_vl(__rvv_int8m4_t arg_0, int8_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_i8m4_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m4_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ARG_1:%.*]], <vscale x 32 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 32 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m4_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ARG_1:%.*]], <vscale x 32 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 32 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+__rvv_int8m4_t test_vadd_vx_i8m4_m_vl(__rvv_bool2_t arg_0, __rvv_int8m4_t arg_1, __rvv_int8m4_t arg_2, int8_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_i8m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m4_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m4_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i32(<vscale x 16 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+__rvv_int16m4_t test_vadd_vx_i16m4_vl(__rvv_int16m4_t arg_0, int16_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_i16m4_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m4_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ARG_1:%.*]], <vscale x 16 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m4_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ARG_1:%.*]], <vscale x 16 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+__rvv_int16m4_t test_vadd_vx_i16m4_m_vl(__rvv_bool4_t arg_0, __rvv_int16m4_t arg_1, __rvv_int16m4_t arg_2, int16_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_i16m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m4_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m4_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i32(<vscale x 8 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+__rvv_int32m4_t test_vadd_vx_i32m4_vl(__rvv_int32m4_t arg_0, int32_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_i32m4_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m4_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ARG_1:%.*]], <vscale x 8 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m4_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ARG_1:%.*]], <vscale x 8 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+__rvv_int32m4_t test_vadd_vx_i32m4_m_vl(__rvv_bool8_t arg_0, __rvv_int32m4_t arg_1, __rvv_int32m4_t arg_2, int32_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_i32m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m4_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m4_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i32(<vscale x 4 x i64> [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+__rvv_int64m4_t test_vadd_vx_i64m4_vl(__rvv_int64m4_t arg_0, int64_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_i64m4_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m4_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ARG_1:%.*]], <vscale x 4 x i64> [[ARG_2:%.*]], i64 [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m4_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ARG_1:%.*]], <vscale x 4 x i64> [[ARG_2:%.*]], i64 [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+__rvv_int64m4_t test_vadd_vx_i64m4_m_vl(__rvv_bool16_t arg_0, __rvv_int64m4_t arg_1, __rvv_int64m4_t arg_2, int64_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_i64m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m8_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m8_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i32(<vscale x 64 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+__rvv_int8m8_t test_vadd_vx_i8m8_vl(__rvv_int8m8_t arg_0, int8_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_i8m8_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8m8_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ARG_1:%.*]], <vscale x 64 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 64 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8m8_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ARG_1:%.*]], <vscale x 64 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 64 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+__rvv_int8m8_t test_vadd_vx_i8m8_m_vl(__rvv_bool1_t arg_0, __rvv_int8m8_t arg_1, __rvv_int8m8_t arg_2, int8_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_i8m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m8_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m8_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i32(<vscale x 32 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+__rvv_int16m8_t test_vadd_vx_i16m8_vl(__rvv_int16m8_t arg_0, int16_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_i16m8_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16m8_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ARG_1:%.*]], <vscale x 32 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 32 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16m8_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ARG_1:%.*]], <vscale x 32 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 32 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+__rvv_int16m8_t test_vadd_vx_i16m8_m_vl(__rvv_bool2_t arg_0, __rvv_int16m8_t arg_1, __rvv_int16m8_t arg_2, int16_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_i16m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m8_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m8_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i32(<vscale x 16 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+__rvv_int32m8_t test_vadd_vx_i32m8_vl(__rvv_int32m8_t arg_0, int32_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_i32m8_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32m8_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ARG_1:%.*]], <vscale x 16 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32m8_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ARG_1:%.*]], <vscale x 16 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+__rvv_int32m8_t test_vadd_vx_i32m8_m_vl(__rvv_bool4_t arg_0, __rvv_int32m8_t arg_1, __rvv_int32m8_t arg_2, int32_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_i32m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m8_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m8_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i32(<vscale x 8 x i64> [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+__rvv_int64m8_t test_vadd_vx_i64m8_vl(__rvv_int64m8_t arg_0, int64_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_i64m8_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i64m8_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ARG_1:%.*]], <vscale x 8 x i64> [[ARG_2:%.*]], i64 [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i64m8_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ARG_1:%.*]], <vscale x 8 x i64> [[ARG_2:%.*]], i64 [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+__rvv_int64m8_t test_vadd_vx_i64m8_m_vl(__rvv_bool8_t arg_0, __rvv_int64m8_t arg_1, __rvv_int64m8_t arg_2, int64_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_i64m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8mf2_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8mf2_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i32(<vscale x 4 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+__rvv_int8mf2_t test_vadd_vx_i8mf2_vl(__rvv_int8mf2_t arg_0, int8_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_i8mf2_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8mf2_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ARG_1:%.*]], <vscale x 4 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8mf2_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ARG_1:%.*]], <vscale x 4 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+__rvv_int8mf2_t test_vadd_vx_i8mf2_m_vl(__rvv_bool16_t arg_0, __rvv_int8mf2_t arg_1, __rvv_int8mf2_t arg_2, int8_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_i8mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16mf2_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16mf2_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i32(<vscale x 2 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+__rvv_int16mf2_t test_vadd_vx_i16mf2_vl(__rvv_int16mf2_t arg_0, int16_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_i16mf2_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16mf2_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ARG_1:%.*]], <vscale x 2 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16mf2_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ARG_1:%.*]], <vscale x 2 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+__rvv_int16mf2_t test_vadd_vx_i16mf2_m_vl(__rvv_bool32_t arg_0, __rvv_int16mf2_t arg_1, __rvv_int16mf2_t arg_2, int16_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_i16mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32mf2_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32mf2_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i32(<vscale x 1 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+__rvv_int32mf2_t test_vadd_vx_i32mf2_vl(__rvv_int32mf2_t arg_0, int32_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_i32mf2_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i32mf2_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ARG_1:%.*]], <vscale x 1 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i32mf2_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ARG_1:%.*]], <vscale x 1 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+__rvv_int32mf2_t test_vadd_vx_i32mf2_m_vl(__rvv_bool64_t arg_0, __rvv_int32mf2_t arg_1, __rvv_int32mf2_t arg_2, int32_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_i32mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8mf4_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8mf4_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i32(<vscale x 2 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+__rvv_int8mf4_t test_vadd_vx_i8mf4_vl(__rvv_int8mf4_t arg_0, int8_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_i8mf4_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8mf4_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ARG_1:%.*]], <vscale x 2 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8mf4_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ARG_1:%.*]], <vscale x 2 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+__rvv_int8mf4_t test_vadd_vx_i8mf4_m_vl(__rvv_bool32_t arg_0, __rvv_int8mf4_t arg_1, __rvv_int8mf4_t arg_2, int8_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_i8mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16mf4_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16mf4_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i32(<vscale x 1 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+__rvv_int16mf4_t test_vadd_vx_i16mf4_vl(__rvv_int16mf4_t arg_0, int16_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_i16mf4_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i16mf4_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ARG_1:%.*]], <vscale x 1 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i16mf4_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ARG_1:%.*]], <vscale x 1 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+__rvv_int16mf4_t test_vadd_vx_i16mf4_m_vl(__rvv_bool64_t arg_0, __rvv_int16mf4_t arg_1, __rvv_int16mf4_t arg_2, int16_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_i16mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8mf8_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8mf8_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i32(<vscale x 1 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+__rvv_int8mf8_t test_vadd_vx_i8mf8_vl(__rvv_int8mf8_t arg_0, int8_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_i8mf8_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_i8mf8_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ARG_1:%.*]], <vscale x 1 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_i8mf8_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ARG_1:%.*]], <vscale x 1 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+__rvv_int8mf8_t test_vadd_vx_i8mf8_m_vl(__rvv_bool64_t arg_0, __rvv_int8mf8_t arg_1, __rvv_int8mf8_t arg_2, int8_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_i8mf8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m1_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ARG_0:%.*]], <vscale x 8 x i8> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m1_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ARG_0:%.*]], <vscale x 8 x i8> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+__rvv_uint8m1_t test_vadd_vv_u8m1_vl(__rvv_uint8m1_t arg_0, __rvv_uint8m1_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_u8m1_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m1_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ARG_1:%.*]], <vscale x 8 x i8> [[ARG_2:%.*]], <vscale x 8 x i8> [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m1_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ARG_1:%.*]], <vscale x 8 x i8> [[ARG_2:%.*]], <vscale x 8 x i8> [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+__rvv_uint8m1_t test_vadd_vv_u8m1_m_vl(__rvv_bool8_t arg_0, __rvv_uint8m1_t arg_1, __rvv_uint8m1_t arg_2, __rvv_uint8m1_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_u8m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m1_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ARG_0:%.*]], <vscale x 4 x i16> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m1_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ARG_0:%.*]], <vscale x 4 x i16> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+__rvv_uint16m1_t test_vadd_vv_u16m1_vl(__rvv_uint16m1_t arg_0, __rvv_uint16m1_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_u16m1_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m1_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ARG_1:%.*]], <vscale x 4 x i16> [[ARG_2:%.*]], <vscale x 4 x i16> [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m1_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ARG_1:%.*]], <vscale x 4 x i16> [[ARG_2:%.*]], <vscale x 4 x i16> [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+__rvv_uint16m1_t test_vadd_vv_u16m1_m_vl(__rvv_bool16_t arg_0, __rvv_uint16m1_t arg_1, __rvv_uint16m1_t arg_2, __rvv_uint16m1_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_u16m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m1_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ARG_0:%.*]], <vscale x 2 x i32> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m1_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ARG_0:%.*]], <vscale x 2 x i32> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+__rvv_uint32m1_t test_vadd_vv_u32m1_vl(__rvv_uint32m1_t arg_0, __rvv_uint32m1_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_u32m1_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m1_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ARG_1:%.*]], <vscale x 2 x i32> [[ARG_2:%.*]], <vscale x 2 x i32> [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m1_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ARG_1:%.*]], <vscale x 2 x i32> [[ARG_2:%.*]], <vscale x 2 x i32> [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+__rvv_uint32m1_t test_vadd_vv_u32m1_m_vl(__rvv_bool32_t arg_0, __rvv_uint32m1_t arg_1, __rvv_uint32m1_t arg_2, __rvv_uint32m1_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_u32m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m1_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ARG_0:%.*]], <vscale x 1 x i64> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m1_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ARG_0:%.*]], <vscale x 1 x i64> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+__rvv_uint64m1_t test_vadd_vv_u64m1_vl(__rvv_uint64m1_t arg_0, __rvv_uint64m1_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_u64m1_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m1_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ARG_1:%.*]], <vscale x 1 x i64> [[ARG_2:%.*]], <vscale x 1 x i64> [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m1_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ARG_1:%.*]], <vscale x 1 x i64> [[ARG_2:%.*]], <vscale x 1 x i64> [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+__rvv_uint64m1_t test_vadd_vv_u64m1_m_vl(__rvv_bool64_t arg_0, __rvv_uint64m1_t arg_1, __rvv_uint64m1_t arg_2, __rvv_uint64m1_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_u64m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m2_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ARG_0:%.*]], <vscale x 16 x i8> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m2_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ARG_0:%.*]], <vscale x 16 x i8> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+__rvv_uint8m2_t test_vadd_vv_u8m2_vl(__rvv_uint8m2_t arg_0, __rvv_uint8m2_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_u8m2_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m2_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ARG_1:%.*]], <vscale x 16 x i8> [[ARG_2:%.*]], <vscale x 16 x i8> [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m2_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ARG_1:%.*]], <vscale x 16 x i8> [[ARG_2:%.*]], <vscale x 16 x i8> [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+__rvv_uint8m2_t test_vadd_vv_u8m2_m_vl(__rvv_bool4_t arg_0, __rvv_uint8m2_t arg_1, __rvv_uint8m2_t arg_2, __rvv_uint8m2_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_u8m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m2_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ARG_0:%.*]], <vscale x 8 x i16> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m2_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ARG_0:%.*]], <vscale x 8 x i16> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+__rvv_uint16m2_t test_vadd_vv_u16m2_vl(__rvv_uint16m2_t arg_0, __rvv_uint16m2_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_u16m2_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m2_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ARG_1:%.*]], <vscale x 8 x i16> [[ARG_2:%.*]], <vscale x 8 x i16> [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m2_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ARG_1:%.*]], <vscale x 8 x i16> [[ARG_2:%.*]], <vscale x 8 x i16> [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+__rvv_uint16m2_t test_vadd_vv_u16m2_m_vl(__rvv_bool8_t arg_0, __rvv_uint16m2_t arg_1, __rvv_uint16m2_t arg_2, __rvv_uint16m2_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_u16m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m2_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ARG_0:%.*]], <vscale x 4 x i32> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m2_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ARG_0:%.*]], <vscale x 4 x i32> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+__rvv_uint32m2_t test_vadd_vv_u32m2_vl(__rvv_uint32m2_t arg_0, __rvv_uint32m2_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_u32m2_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m2_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ARG_1:%.*]], <vscale x 4 x i32> [[ARG_2:%.*]], <vscale x 4 x i32> [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m2_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ARG_1:%.*]], <vscale x 4 x i32> [[ARG_2:%.*]], <vscale x 4 x i32> [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+__rvv_uint32m2_t test_vadd_vv_u32m2_m_vl(__rvv_bool16_t arg_0, __rvv_uint32m2_t arg_1, __rvv_uint32m2_t arg_2, __rvv_uint32m2_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_u32m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m2_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ARG_0:%.*]], <vscale x 2 x i64> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m2_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ARG_0:%.*]], <vscale x 2 x i64> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+__rvv_uint64m2_t test_vadd_vv_u64m2_vl(__rvv_uint64m2_t arg_0, __rvv_uint64m2_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_u64m2_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m2_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ARG_1:%.*]], <vscale x 2 x i64> [[ARG_2:%.*]], <vscale x 2 x i64> [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m2_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ARG_1:%.*]], <vscale x 2 x i64> [[ARG_2:%.*]], <vscale x 2 x i64> [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+__rvv_uint64m2_t test_vadd_vv_u64m2_m_vl(__rvv_bool32_t arg_0, __rvv_uint64m2_t arg_1, __rvv_uint64m2_t arg_2, __rvv_uint64m2_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_u64m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m4_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ARG_0:%.*]], <vscale x 32 x i8> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m4_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ARG_0:%.*]], <vscale x 32 x i8> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+__rvv_uint8m4_t test_vadd_vv_u8m4_vl(__rvv_uint8m4_t arg_0, __rvv_uint8m4_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_u8m4_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m4_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ARG_1:%.*]], <vscale x 32 x i8> [[ARG_2:%.*]], <vscale x 32 x i8> [[ARG_3:%.*]], <vscale x 32 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m4_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ARG_1:%.*]], <vscale x 32 x i8> [[ARG_2:%.*]], <vscale x 32 x i8> [[ARG_3:%.*]], <vscale x 32 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+__rvv_uint8m4_t test_vadd_vv_u8m4_m_vl(__rvv_bool2_t arg_0, __rvv_uint8m4_t arg_1, __rvv_uint8m4_t arg_2, __rvv_uint8m4_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_u8m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m4_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ARG_0:%.*]], <vscale x 16 x i16> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m4_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ARG_0:%.*]], <vscale x 16 x i16> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+__rvv_uint16m4_t test_vadd_vv_u16m4_vl(__rvv_uint16m4_t arg_0, __rvv_uint16m4_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_u16m4_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m4_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ARG_1:%.*]], <vscale x 16 x i16> [[ARG_2:%.*]], <vscale x 16 x i16> [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m4_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ARG_1:%.*]], <vscale x 16 x i16> [[ARG_2:%.*]], <vscale x 16 x i16> [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+__rvv_uint16m4_t test_vadd_vv_u16m4_m_vl(__rvv_bool4_t arg_0, __rvv_uint16m4_t arg_1, __rvv_uint16m4_t arg_2, __rvv_uint16m4_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_u16m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m4_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ARG_0:%.*]], <vscale x 8 x i32> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m4_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ARG_0:%.*]], <vscale x 8 x i32> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+__rvv_uint32m4_t test_vadd_vv_u32m4_vl(__rvv_uint32m4_t arg_0, __rvv_uint32m4_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_u32m4_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m4_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ARG_1:%.*]], <vscale x 8 x i32> [[ARG_2:%.*]], <vscale x 8 x i32> [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m4_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ARG_1:%.*]], <vscale x 8 x i32> [[ARG_2:%.*]], <vscale x 8 x i32> [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+__rvv_uint32m4_t test_vadd_vv_u32m4_m_vl(__rvv_bool8_t arg_0, __rvv_uint32m4_t arg_1, __rvv_uint32m4_t arg_2, __rvv_uint32m4_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_u32m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m4_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ARG_0:%.*]], <vscale x 4 x i64> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m4_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ARG_0:%.*]], <vscale x 4 x i64> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+__rvv_uint64m4_t test_vadd_vv_u64m4_vl(__rvv_uint64m4_t arg_0, __rvv_uint64m4_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_u64m4_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m4_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ARG_1:%.*]], <vscale x 4 x i64> [[ARG_2:%.*]], <vscale x 4 x i64> [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m4_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ARG_1:%.*]], <vscale x 4 x i64> [[ARG_2:%.*]], <vscale x 4 x i64> [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+__rvv_uint64m4_t test_vadd_vv_u64m4_m_vl(__rvv_bool16_t arg_0, __rvv_uint64m4_t arg_1, __rvv_uint64m4_t arg_2, __rvv_uint64m4_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_u64m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m8_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ARG_0:%.*]], <vscale x 64 x i8> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m8_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ARG_0:%.*]], <vscale x 64 x i8> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+__rvv_uint8m8_t test_vadd_vv_u8m8_vl(__rvv_uint8m8_t arg_0, __rvv_uint8m8_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_u8m8_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8m8_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ARG_1:%.*]], <vscale x 64 x i8> [[ARG_2:%.*]], <vscale x 64 x i8> [[ARG_3:%.*]], <vscale x 64 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8m8_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ARG_1:%.*]], <vscale x 64 x i8> [[ARG_2:%.*]], <vscale x 64 x i8> [[ARG_3:%.*]], <vscale x 64 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+__rvv_uint8m8_t test_vadd_vv_u8m8_m_vl(__rvv_bool1_t arg_0, __rvv_uint8m8_t arg_1, __rvv_uint8m8_t arg_2, __rvv_uint8m8_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_u8m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m8_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ARG_0:%.*]], <vscale x 32 x i16> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m8_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ARG_0:%.*]], <vscale x 32 x i16> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+__rvv_uint16m8_t test_vadd_vv_u16m8_vl(__rvv_uint16m8_t arg_0, __rvv_uint16m8_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_u16m8_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16m8_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ARG_1:%.*]], <vscale x 32 x i16> [[ARG_2:%.*]], <vscale x 32 x i16> [[ARG_3:%.*]], <vscale x 32 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16m8_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ARG_1:%.*]], <vscale x 32 x i16> [[ARG_2:%.*]], <vscale x 32 x i16> [[ARG_3:%.*]], <vscale x 32 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+__rvv_uint16m8_t test_vadd_vv_u16m8_m_vl(__rvv_bool2_t arg_0, __rvv_uint16m8_t arg_1, __rvv_uint16m8_t arg_2, __rvv_uint16m8_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_u16m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m8_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ARG_0:%.*]], <vscale x 16 x i32> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m8_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ARG_0:%.*]], <vscale x 16 x i32> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+__rvv_uint32m8_t test_vadd_vv_u32m8_vl(__rvv_uint32m8_t arg_0, __rvv_uint32m8_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_u32m8_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32m8_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ARG_1:%.*]], <vscale x 16 x i32> [[ARG_2:%.*]], <vscale x 16 x i32> [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32m8_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ARG_1:%.*]], <vscale x 16 x i32> [[ARG_2:%.*]], <vscale x 16 x i32> [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+__rvv_uint32m8_t test_vadd_vv_u32m8_m_vl(__rvv_bool4_t arg_0, __rvv_uint32m8_t arg_1, __rvv_uint32m8_t arg_2, __rvv_uint32m8_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_u32m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m8_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ARG_0:%.*]], <vscale x 8 x i64> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m8_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ARG_0:%.*]], <vscale x 8 x i64> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+__rvv_uint64m8_t test_vadd_vv_u64m8_vl(__rvv_uint64m8_t arg_0, __rvv_uint64m8_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_u64m8_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u64m8_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ARG_1:%.*]], <vscale x 8 x i64> [[ARG_2:%.*]], <vscale x 8 x i64> [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u64m8_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ARG_1:%.*]], <vscale x 8 x i64> [[ARG_2:%.*]], <vscale x 8 x i64> [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+__rvv_uint64m8_t test_vadd_vv_u64m8_m_vl(__rvv_bool8_t arg_0, __rvv_uint64m8_t arg_1, __rvv_uint64m8_t arg_2, __rvv_uint64m8_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_u64m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8mf2_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ARG_0:%.*]], <vscale x 4 x i8> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8mf2_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ARG_0:%.*]], <vscale x 4 x i8> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+__rvv_uint8mf2_t test_vadd_vv_u8mf2_vl(__rvv_uint8mf2_t arg_0, __rvv_uint8mf2_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_u8mf2_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8mf2_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ARG_1:%.*]], <vscale x 4 x i8> [[ARG_2:%.*]], <vscale x 4 x i8> [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8mf2_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ARG_1:%.*]], <vscale x 4 x i8> [[ARG_2:%.*]], <vscale x 4 x i8> [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+__rvv_uint8mf2_t test_vadd_vv_u8mf2_m_vl(__rvv_bool16_t arg_0, __rvv_uint8mf2_t arg_1, __rvv_uint8mf2_t arg_2, __rvv_uint8mf2_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_u8mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16mf2_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ARG_0:%.*]], <vscale x 2 x i16> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16mf2_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ARG_0:%.*]], <vscale x 2 x i16> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+__rvv_uint16mf2_t test_vadd_vv_u16mf2_vl(__rvv_uint16mf2_t arg_0, __rvv_uint16mf2_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_u16mf2_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16mf2_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ARG_1:%.*]], <vscale x 2 x i16> [[ARG_2:%.*]], <vscale x 2 x i16> [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16mf2_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ARG_1:%.*]], <vscale x 2 x i16> [[ARG_2:%.*]], <vscale x 2 x i16> [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+__rvv_uint16mf2_t test_vadd_vv_u16mf2_m_vl(__rvv_bool32_t arg_0, __rvv_uint16mf2_t arg_1, __rvv_uint16mf2_t arg_2, __rvv_uint16mf2_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_u16mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32mf2_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ARG_0:%.*]], <vscale x 1 x i32> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32mf2_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ARG_0:%.*]], <vscale x 1 x i32> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+__rvv_uint32mf2_t test_vadd_vv_u32mf2_vl(__rvv_uint32mf2_t arg_0, __rvv_uint32mf2_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_u32mf2_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u32mf2_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ARG_1:%.*]], <vscale x 1 x i32> [[ARG_2:%.*]], <vscale x 1 x i32> [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u32mf2_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ARG_1:%.*]], <vscale x 1 x i32> [[ARG_2:%.*]], <vscale x 1 x i32> [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+__rvv_uint32mf2_t test_vadd_vv_u32mf2_m_vl(__rvv_bool64_t arg_0, __rvv_uint32mf2_t arg_1, __rvv_uint32mf2_t arg_2, __rvv_uint32mf2_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_u32mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8mf4_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ARG_0:%.*]], <vscale x 2 x i8> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8mf4_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ARG_0:%.*]], <vscale x 2 x i8> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+__rvv_uint8mf4_t test_vadd_vv_u8mf4_vl(__rvv_uint8mf4_t arg_0, __rvv_uint8mf4_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_u8mf4_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8mf4_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ARG_1:%.*]], <vscale x 2 x i8> [[ARG_2:%.*]], <vscale x 2 x i8> [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8mf4_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ARG_1:%.*]], <vscale x 2 x i8> [[ARG_2:%.*]], <vscale x 2 x i8> [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+__rvv_uint8mf4_t test_vadd_vv_u8mf4_m_vl(__rvv_bool32_t arg_0, __rvv_uint8mf4_t arg_1, __rvv_uint8mf4_t arg_2, __rvv_uint8mf4_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_u8mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16mf4_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ARG_0:%.*]], <vscale x 1 x i16> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16mf4_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ARG_0:%.*]], <vscale x 1 x i16> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+__rvv_uint16mf4_t test_vadd_vv_u16mf4_vl(__rvv_uint16mf4_t arg_0, __rvv_uint16mf4_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_u16mf4_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u16mf4_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ARG_1:%.*]], <vscale x 1 x i16> [[ARG_2:%.*]], <vscale x 1 x i16> [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u16mf4_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ARG_1:%.*]], <vscale x 1 x i16> [[ARG_2:%.*]], <vscale x 1 x i16> [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+__rvv_uint16mf4_t test_vadd_vv_u16mf4_m_vl(__rvv_bool64_t arg_0, __rvv_uint16mf4_t arg_1, __rvv_uint16mf4_t arg_2, __rvv_uint16mf4_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_u16mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8mf8_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ARG_0:%.*]], <vscale x 1 x i8> [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8mf8_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ARG_0:%.*]], <vscale x 1 x i8> [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+__rvv_uint8mf8_t test_vadd_vv_u8mf8_vl(__rvv_uint8mf8_t arg_0, __rvv_uint8mf8_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vv_u8mf8_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vv_u8mf8_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ARG_1:%.*]], <vscale x 1 x i8> [[ARG_2:%.*]], <vscale x 1 x i8> [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vv_u8mf8_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ARG_1:%.*]], <vscale x 1 x i8> [[ARG_2:%.*]], <vscale x 1 x i8> [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+__rvv_uint8mf8_t test_vadd_vv_u8mf8_m_vl(__rvv_bool64_t arg_0, __rvv_uint8mf8_t arg_1, __rvv_uint8mf8_t arg_2, __rvv_uint8mf8_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vv_u8mf8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m1_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m1_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.i8.i32(<vscale x 8 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+__rvv_uint8m1_t test_vadd_vx_u8m1_vl(__rvv_uint8m1_t arg_0, uint8_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_u8m1_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m1_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ARG_1:%.*]], <vscale x 8 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m1_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ARG_1:%.*]], <vscale x 8 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+__rvv_uint8m1_t test_vadd_vx_u8m1_m_vl(__rvv_bool8_t arg_0, __rvv_uint8m1_t arg_1, __rvv_uint8m1_t arg_2, uint8_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_u8m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m1_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m1_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i16> @llvm.riscv.vadd.nxv4i16.i16.i32(<vscale x 4 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+__rvv_uint16m1_t test_vadd_vx_u16m1_vl(__rvv_uint16m1_t arg_0, uint16_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_u16m1_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m1_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ARG_1:%.*]], <vscale x 4 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m1_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ARG_1:%.*]], <vscale x 4 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+__rvv_uint16m1_t test_vadd_vx_u16m1_m_vl(__rvv_bool16_t arg_0, __rvv_uint16m1_t arg_1, __rvv_uint16m1_t arg_2, uint16_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_u16m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m1_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m1_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i32> @llvm.riscv.vadd.nxv2i32.i32.i32(<vscale x 2 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+__rvv_uint32m1_t test_vadd_vx_u32m1_vl(__rvv_uint32m1_t arg_0, uint32_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_u32m1_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m1_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ARG_1:%.*]], <vscale x 2 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m1_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ARG_1:%.*]], <vscale x 2 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+__rvv_uint32m1_t test_vadd_vx_u32m1_m_vl(__rvv_bool32_t arg_0, __rvv_uint32m1_t arg_1, __rvv_uint32m1_t arg_2, uint32_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_u32m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m1_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m1_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.i64.i32(<vscale x 1 x i64> [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+__rvv_uint64m1_t test_vadd_vx_u64m1_vl(__rvv_uint64m1_t arg_0, uint64_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_u64m1_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m1_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ARG_1:%.*]], <vscale x 1 x i64> [[ARG_2:%.*]], i64 [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m1_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ARG_1:%.*]], <vscale x 1 x i64> [[ARG_2:%.*]], i64 [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+__rvv_uint64m1_t test_vadd_vx_u64m1_m_vl(__rvv_bool64_t arg_0, __rvv_uint64m1_t arg_1, __rvv_uint64m1_t arg_2, uint64_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_u64m1_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m2_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m2_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.riscv.vadd.nxv16i8.i8.i32(<vscale x 16 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+__rvv_uint8m2_t test_vadd_vx_u8m2_vl(__rvv_uint8m2_t arg_0, uint8_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_u8m2_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m2_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ARG_1:%.*]], <vscale x 16 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m2_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ARG_1:%.*]], <vscale x 16 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+__rvv_uint8m2_t test_vadd_vx_u8m2_m_vl(__rvv_bool4_t arg_0, __rvv_uint8m2_t arg_1, __rvv_uint8m2_t arg_2, uint8_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_u8m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m2_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m2_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.riscv.vadd.nxv8i16.i16.i32(<vscale x 8 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+__rvv_uint16m2_t test_vadd_vx_u16m2_vl(__rvv_uint16m2_t arg_0, uint16_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_u16m2_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m2_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ARG_1:%.*]], <vscale x 8 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m2_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ARG_1:%.*]], <vscale x 8 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+__rvv_uint16m2_t test_vadd_vx_u16m2_m_vl(__rvv_bool8_t arg_0, __rvv_uint16m2_t arg_1, __rvv_uint16m2_t arg_2, uint16_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_u16m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m2_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m2_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.i32.i32(<vscale x 4 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+__rvv_uint32m2_t test_vadd_vx_u32m2_vl(__rvv_uint32m2_t arg_0, uint32_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_u32m2_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m2_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ARG_1:%.*]], <vscale x 4 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m2_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ARG_1:%.*]], <vscale x 4 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+__rvv_uint32m2_t test_vadd_vx_u32m2_m_vl(__rvv_bool16_t arg_0, __rvv_uint32m2_t arg_1, __rvv_uint32m2_t arg_2, uint32_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_u32m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m2_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m2_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.riscv.vadd.nxv2i64.i64.i32(<vscale x 2 x i64> [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+__rvv_uint64m2_t test_vadd_vx_u64m2_vl(__rvv_uint64m2_t arg_0, uint64_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_u64m2_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m2_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ARG_1:%.*]], <vscale x 2 x i64> [[ARG_2:%.*]], i64 [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m2_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.riscv.vadd.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ARG_1:%.*]], <vscale x 2 x i64> [[ARG_2:%.*]], i64 [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+__rvv_uint64m2_t test_vadd_vx_u64m2_m_vl(__rvv_bool32_t arg_0, __rvv_uint64m2_t arg_1, __rvv_uint64m2_t arg_2, uint64_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_u64m2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m4_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m4_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i8> @llvm.riscv.vadd.nxv32i8.i8.i32(<vscale x 32 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+__rvv_uint8m4_t test_vadd_vx_u8m4_vl(__rvv_uint8m4_t arg_0, uint8_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_u8m4_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m4_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ARG_1:%.*]], <vscale x 32 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 32 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m4_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ARG_1:%.*]], <vscale x 32 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 32 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+__rvv_uint8m4_t test_vadd_vx_u8m4_m_vl(__rvv_bool2_t arg_0, __rvv_uint8m4_t arg_1, __rvv_uint8m4_t arg_2, uint8_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_u8m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m4_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m4_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i16> @llvm.riscv.vadd.nxv16i16.i16.i32(<vscale x 16 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+__rvv_uint16m4_t test_vadd_vx_u16m4_vl(__rvv_uint16m4_t arg_0, uint16_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_u16m4_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m4_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ARG_1:%.*]], <vscale x 16 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m4_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ARG_1:%.*]], <vscale x 16 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+__rvv_uint16m4_t test_vadd_vx_u16m4_m_vl(__rvv_bool4_t arg_0, __rvv_uint16m4_t arg_1, __rvv_uint16m4_t arg_2, uint16_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_u16m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m4_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m4_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i32> @llvm.riscv.vadd.nxv8i32.i32.i32(<vscale x 8 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+__rvv_uint32m4_t test_vadd_vx_u32m4_vl(__rvv_uint32m4_t arg_0, uint32_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_u32m4_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m4_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ARG_1:%.*]], <vscale x 8 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m4_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ARG_1:%.*]], <vscale x 8 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+__rvv_uint32m4_t test_vadd_vx_u32m4_m_vl(__rvv_bool8_t arg_0, __rvv_uint32m4_t arg_1, __rvv_uint32m4_t arg_2, uint32_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_u32m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m4_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m4_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i64> @llvm.riscv.vadd.nxv4i64.i64.i32(<vscale x 4 x i64> [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+__rvv_uint64m4_t test_vadd_vx_u64m4_vl(__rvv_uint64m4_t arg_0, uint64_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_u64m4_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m4_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ARG_1:%.*]], <vscale x 4 x i64> [[ARG_2:%.*]], i64 [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m4_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i64> @llvm.riscv.vadd.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ARG_1:%.*]], <vscale x 4 x i64> [[ARG_2:%.*]], i64 [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+__rvv_uint64m4_t test_vadd_vx_u64m4_m_vl(__rvv_bool16_t arg_0, __rvv_uint64m4_t arg_1, __rvv_uint64m4_t arg_2, uint64_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_u64m4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m8_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m8_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8.i32(<vscale x 64 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+__rvv_uint8m8_t test_vadd_vx_u8m8_vl(__rvv_uint8m8_t arg_0, uint8_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_u8m8_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8m8_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ARG_1:%.*]], <vscale x 64 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 64 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8m8_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ARG_1:%.*]], <vscale x 64 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 64 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+__rvv_uint8m8_t test_vadd_vx_u8m8_m_vl(__rvv_bool1_t arg_0, __rvv_uint8m8_t arg_1, __rvv_uint8m8_t arg_2, uint8_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_u8m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m8_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m8_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i16> @llvm.riscv.vadd.nxv32i16.i16.i32(<vscale x 32 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+__rvv_uint16m8_t test_vadd_vx_u16m8_vl(__rvv_uint16m8_t arg_0, uint16_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_u16m8_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16m8_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ARG_1:%.*]], <vscale x 32 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 32 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16m8_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ARG_1:%.*]], <vscale x 32 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 32 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+__rvv_uint16m8_t test_vadd_vx_u16m8_m_vl(__rvv_bool2_t arg_0, __rvv_uint16m8_t arg_1, __rvv_uint16m8_t arg_2, uint16_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_u16m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m8_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m8_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i32> @llvm.riscv.vadd.nxv16i32.i32.i32(<vscale x 16 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+__rvv_uint32m8_t test_vadd_vx_u32m8_vl(__rvv_uint32m8_t arg_0, uint32_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_u32m8_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32m8_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ARG_1:%.*]], <vscale x 16 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32m8_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ARG_1:%.*]], <vscale x 16 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 16 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+__rvv_uint32m8_t test_vadd_vx_u32m8_m_vl(__rvv_bool4_t arg_0, __rvv_uint32m8_t arg_1, __rvv_uint32m8_t arg_2, uint32_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_u32m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m8_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m8_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i64> @llvm.riscv.vadd.nxv8i64.i64.i32(<vscale x 8 x i64> [[ARG_0:%.*]], i64 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+__rvv_uint64m8_t test_vadd_vx_u64m8_vl(__rvv_uint64m8_t arg_0, uint64_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_u64m8_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u64m8_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ARG_1:%.*]], <vscale x 8 x i64> [[ARG_2:%.*]], i64 [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u64m8_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 8 x i64> @llvm.riscv.vadd.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ARG_1:%.*]], <vscale x 8 x i64> [[ARG_2:%.*]], i64 [[ARG_3:%.*]], <vscale x 8 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+__rvv_uint64m8_t test_vadd_vx_u64m8_m_vl(__rvv_bool8_t arg_0, __rvv_uint64m8_t arg_1, __rvv_uint64m8_t arg_2, uint64_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_u64m8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8mf2_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8mf2_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i8> @llvm.riscv.vadd.nxv4i8.i8.i32(<vscale x 4 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+__rvv_uint8mf2_t test_vadd_vx_u8mf2_vl(__rvv_uint8mf2_t arg_0, uint8_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_u8mf2_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8mf2_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ARG_1:%.*]], <vscale x 4 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8mf2_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ARG_1:%.*]], <vscale x 4 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 4 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+__rvv_uint8mf2_t test_vadd_vx_u8mf2_m_vl(__rvv_bool16_t arg_0, __rvv_uint8mf2_t arg_1, __rvv_uint8mf2_t arg_2, uint8_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_u8mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16mf2_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16mf2_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i16> @llvm.riscv.vadd.nxv2i16.i16.i32(<vscale x 2 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+__rvv_uint16mf2_t test_vadd_vx_u16mf2_vl(__rvv_uint16mf2_t arg_0, uint16_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_u16mf2_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16mf2_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ARG_1:%.*]], <vscale x 2 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16mf2_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ARG_1:%.*]], <vscale x 2 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+__rvv_uint16mf2_t test_vadd_vx_u16mf2_m_vl(__rvv_bool32_t arg_0, __rvv_uint16mf2_t arg_1, __rvv_uint16mf2_t arg_2, uint16_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_u16mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32mf2_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32mf2_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i32> @llvm.riscv.vadd.nxv1i32.i32.i32(<vscale x 1 x i32> [[ARG_0:%.*]], i32 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+__rvv_uint32mf2_t test_vadd_vx_u32mf2_vl(__rvv_uint32mf2_t arg_0, uint32_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_u32mf2_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u32mf2_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ARG_1:%.*]], <vscale x 1 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u32mf2_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ARG_1:%.*]], <vscale x 1 x i32> [[ARG_2:%.*]], i32 [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+__rvv_uint32mf2_t test_vadd_vx_u32mf2_m_vl(__rvv_bool64_t arg_0, __rvv_uint32mf2_t arg_1, __rvv_uint32mf2_t arg_2, uint32_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_u32mf2_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8mf4_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8mf4_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i8> @llvm.riscv.vadd.nxv2i8.i8.i32(<vscale x 2 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+__rvv_uint8mf4_t test_vadd_vx_u8mf4_vl(__rvv_uint8mf4_t arg_0, uint8_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_u8mf4_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8mf4_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ARG_1:%.*]], <vscale x 2 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8mf4_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ARG_1:%.*]], <vscale x 2 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 2 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+__rvv_uint8mf4_t test_vadd_vx_u8mf4_m_vl(__rvv_bool32_t arg_0, __rvv_uint8mf4_t arg_1, __rvv_uint8mf4_t arg_2, uint8_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_u8mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16mf4_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16mf4_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i16> @llvm.riscv.vadd.nxv1i16.i16.i32(<vscale x 1 x i16> [[ARG_0:%.*]], i16 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+__rvv_uint16mf4_t test_vadd_vx_u16mf4_vl(__rvv_uint16mf4_t arg_0, uint16_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_u16mf4_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u16mf4_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ARG_1:%.*]], <vscale x 1 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u16mf4_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ARG_1:%.*]], <vscale x 1 x i16> [[ARG_2:%.*]], i16 [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+__rvv_uint16mf4_t test_vadd_vx_u16mf4_m_vl(__rvv_bool64_t arg_0, __rvv_uint16mf4_t arg_1, __rvv_uint16mf4_t arg_2, uint16_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_u16mf4_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8mf8_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i64 [[ARG_2:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8mf8_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.i8.i32(<vscale x 1 x i8> [[ARG_0:%.*]], i8 [[ARG_1:%.*]], i32 [[ARG_2:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+__rvv_uint8mf8_t test_vadd_vx_u8mf8_vl(__rvv_uint8mf8_t arg_0, uint8_t arg_1, size_t arg_2)
+{
+    return __builtin_rvv_vadd_vx_u8mf8_vl(arg_0, arg_1, arg_2);
+}
+
+// CHECK-RV64-O2-LABEL: @test_vadd_vx_u8mf8_m_vl(
+// CHECK-RV64-O2-NEXT:  entry:
+// CHECK-RV64-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ARG_1:%.*]], <vscale x 1 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i64 [[ARG_4:%.*]])
+// CHECK-RV64-O2-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV32-O2-LABEL: @test_vadd_vx_u8mf8_m_vl(
+// CHECK-RV32-O2-NEXT:  entry:
+// CHECK-RV32-O2-NEXT:    [[TMP0:%.*]] = tail call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ARG_1:%.*]], <vscale x 1 x i8> [[ARG_2:%.*]], i8 [[ARG_3:%.*]], <vscale x 1 x i1> [[ARG_0:%.*]], i32 [[ARG_4:%.*]])
+// CHECK-RV32-O2-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+__rvv_uint8mf8_t test_vadd_vx_u8mf8_m_vl(__rvv_bool64_t arg_0, __rvv_uint8mf8_t arg_1, __rvv_uint8mf8_t arg_2, uint8_t arg_3, size_t arg_4)
+{
+    return __builtin_rvv_vadd_vx_u8mf8_m_vl(arg_0, arg_1, arg_2, arg_3, arg_4);
+}


        


More information about the cfe-commits mailing list