[clang] 6c6abb1 - [RISCV] Remove Zvamo C intrinsics and builtins.

Craig Topper via cfe-commits cfe-commits at lists.llvm.org
Thu Oct 21 16:10:11 PDT 2021


Author: Craig Topper
Date: 2021-10-21T16:10:05-07:00
New Revision: 6c6abb1ded8d91760e8a3b500f959fee5b767406

URL: https://github.com/llvm/llvm-project/commit/6c6abb1ded8d91760e8a3b500f959fee5b767406
DIFF: https://github.com/llvm/llvm-project/commit/6c6abb1ded8d91760e8a3b500f959fee5b767406.diff

LOG: [RISCV] Remove Zvamo C intrinsics and builtins.

Zvamo is not part of the 1.0 V spec. Remove the intrinsics
for now. This helps reduce clang binary size and lit test time.

Reviewed By: HsiangKai

Differential Revision: https://reviews.llvm.org/D111692

Added: 
    

Modified: 
    clang/include/clang/Basic/riscv_vector.td
    clang/utils/TableGen/RISCVVEmitter.cpp

Removed: 
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoand.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomax.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomin.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoswap.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoxor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vamoadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vamoand.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vamomax.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vamomin.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vamoor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vamoswap.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vamoxor.c


################################################################################
diff  --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index 607f277ebb4ec..6c812a88edd5f 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -215,7 +215,7 @@ class RVVBuiltin<string suffix, string prototype, string type_range,
   // an automatic definition in header is emitted.
   string HeaderCode = "";
 
-  // Sub extension of vector spec. Currently only support Zvamo or Zvlsseg.
+  // Sub extension of vector spec. Currently only support Zvlsseg.
   string RequiredExtension = "";
 
   // Number of fields for Zvlsseg.
@@ -1257,36 +1257,6 @@ multiclass RVVIndexedSegStore<string op> {
   }
 }
 
-multiclass RVVAMOBuiltinSet<bit has_signed = false, bit has_unsigned = false,
-                            bit has_fp = false> {
-  defvar type_list = !if(has_fp, ["i","l","f","d"], ["i","l"]);
-  foreach type = type_list in
-    foreach eew_list = EEWList in {
-      defvar eew = eew_list[0];
-      defvar eew_index = eew_list[1];
-      let Name = NAME # "ei" # eew # "_" # "v",
-          IRName = NAME,
-          IRNameMask = NAME # "_mask",
-          HasMaskedOffOperand = false,
-          HasPolicy = false,
-          ManualCodegen = [{
-            // base, bindex, value, vl
-            IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[3]->getType()};
-            Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo());
-          }],
-          ManualCodegenMask = [{
-            std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
-            IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[4]->getType()};
-            Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo());
-          }] in {
-        if has_signed then
-          def : RVVBuiltin<"v", "vPe" # eew_index # "Uvv", type>;
-        if !and(!not(IsFloat<type>.val), has_unsigned) then
-          def : RVVBuiltin<"Uv", "UvPUe" # eew_index # "UvUv", type>;
-      }
-    }
-}
-
 multiclass RVVPseudoUnaryBuiltin<string IR, string type_range> {
   let Name = NAME,
       IRName = IR,
@@ -1591,19 +1561,6 @@ defm : RVVIndexedSegStore<"vsuxseg">;
 defm : RVVIndexedSegStore<"vsoxseg">;
 }
 
-// 8. Vector AMO Operations
-let RequiredExtension = "Zvamo" in {
-defm vamoswap : RVVAMOBuiltinSet< /* hasSigned */ true, /* hasUnsigned */ true, /* hasFP */ true>;
-defm vamoadd : RVVAMOBuiltinSet< /* hasSigned */ true, /* hasUnsigned */ true>;
-defm vamoxor : RVVAMOBuiltinSet< /* hasSigned */ true, /* hasUnsigned */ true>;
-defm vamoand : RVVAMOBuiltinSet< /* hasSigned */ true, /* hasUnsigned */ true>;
-defm vamoor : RVVAMOBuiltinSet< /* hasSigned */ true, /* hasUnsigned */ true>;
-defm vamomin : RVVAMOBuiltinSet< /* hasSigned */ true>;
-defm vamomax : RVVAMOBuiltinSet< /* hasSigned */ true>;
-defm vamominu : RVVAMOBuiltinSet< /* hasSigned */ false, /* hasUnsigned */ true>;
-defm vamomaxu : RVVAMOBuiltinSet< /* hasSigned */ false, /* hasUnsigned */ true>;
-}
-
 // 12. Vector Integer Arithmetic Instructions
 // 12.1. Vector Single-Width Integer Add and Subtract
 defm vadd : RVVIntBinBuiltinSet;

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoadd.c
deleted file mode 100644
index 87420436ee29d..0000000000000
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoadd.c
+++ /dev/null
@@ -1,1406 +0,0 @@
-// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
-// REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
-
-#include <riscv_vector.h>
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoaddei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoaddei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoaddei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
-  return vamoaddei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoaddei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoaddei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoaddei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
-  return vamoaddei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoaddei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
-  return vamoaddei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoaddei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoaddei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoaddei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoaddei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoaddei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
-  return vamoaddei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoaddei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
-  return vamoaddei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoaddei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
-  return vamoaddei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoaddei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoaddei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoaddei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
-  return vamoaddei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoaddei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoaddei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoaddei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
-  return vamoaddei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoaddei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
-  return vamoaddei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoaddei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoaddei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoaddei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoaddei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoaddei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
-  return vamoaddei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoaddei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
-  return vamoaddei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoaddei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
-  return vamoaddei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoaddei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
-  return vamoaddei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoaddei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoaddei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoaddei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
-  return vamoaddei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoaddei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
-  return vamoaddei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoaddei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoaddei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoaddei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
-  return vamoaddei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoaddei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
-  return vamoaddei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoaddei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
-  return vamoaddei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoaddei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
-  return vamoaddei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoaddei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoaddei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoaddei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
-  return vamoaddei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoaddei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
-  return vamoaddei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoaddei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoaddei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoaddei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
-  return vamoaddei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoaddei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
-  return vamoaddei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoaddei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoaddei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoaddei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoaddei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoaddei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoaddei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoaddei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoaddei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoaddei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoaddei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoaddei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoaddei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoaddei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoaddei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoaddei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoaddei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoaddei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoaddei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoaddei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoaddei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoaddei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoaddei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoaddei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoaddei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoaddei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoaddei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoaddei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoaddei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoaddei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoaddei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoaddei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoaddei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoaddei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoaddei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoaddei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoaddei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoaddei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoaddei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoaddei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoaddei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoaddei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoaddei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoaddei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoaddei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoaddei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoaddei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoaddei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoaddei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoaddei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoaddei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoaddei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoaddei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoaddei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoaddei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoaddei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoaddei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoaddei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoaddei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoaddei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoaddei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoaddei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoaddei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoaddei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoaddei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoaddei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoaddei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoaddei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoaddei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoaddei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoaddei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoaddei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoaddei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoaddei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
-  return vamoaddei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoaddei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoaddei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoaddei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
-  return vamoaddei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoaddei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
-  return vamoaddei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoaddei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoaddei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoaddei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoaddei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoaddei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
-  return vamoaddei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoaddei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
-  return vamoaddei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoaddei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
-  return vamoaddei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoaddei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoaddei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoaddei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
-  return vamoaddei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoaddei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoaddei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoaddei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
-  return vamoaddei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoaddei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
-  return vamoaddei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoaddei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoaddei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoaddei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoaddei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoaddei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
-  return vamoaddei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoaddei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
-  return vamoaddei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoaddei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
-  return vamoaddei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoaddei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
-  return vamoaddei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoaddei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoaddei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoaddei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
-  return vamoaddei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoaddei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
-  return vamoaddei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoaddei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoaddei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoaddei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
-  return vamoaddei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoaddei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
-  return vamoaddei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoaddei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
-  return vamoaddei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoaddei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
-  return vamoaddei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoaddei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoaddei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoaddei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
-  return vamoaddei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoaddei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
-  return vamoaddei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoaddei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoaddei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoaddei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
-  return vamoaddei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoaddei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
-  return vamoaddei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoaddei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoaddei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoaddei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoaddei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoaddei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoaddei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoaddei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoaddei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoaddei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoaddei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoaddei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoaddei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoaddei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoaddei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoaddei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoaddei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoaddei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoaddei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoaddei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoaddei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoaddei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoaddei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoaddei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoaddei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoaddei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoaddei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoaddei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoaddei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoaddei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoaddei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoaddei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoaddei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoaddei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoaddei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoaddei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoaddei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoaddei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoaddei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoaddei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoaddei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoaddei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoaddei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoaddei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoaddei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoaddei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoaddei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoaddei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoaddei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoaddei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoaddei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoaddei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoaddei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoaddei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoaddei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoaddei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoaddei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoaddei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoaddei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoaddei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoaddei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoaddei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoaddei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoaddei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoaddei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoaddei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoaddei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoaddei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoaddei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoaddei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoaddei64(mask, base, bindex, value, vl);
-}
-

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoand.c
deleted file mode 100644
index 281c13c3567a2..0000000000000
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoand.c
+++ /dev/null
@@ -1,1406 +0,0 @@
-// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
-// REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
-
-#include <riscv_vector.h>
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoandei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoandei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoandei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
-  return vamoandei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoandei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoandei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoandei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
-  return vamoandei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoandei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
-  return vamoandei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoandei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoandei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoandei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoandei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoandei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
-  return vamoandei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoandei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
-  return vamoandei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoandei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
-  return vamoandei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoandei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoandei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoandei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
-  return vamoandei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoandei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoandei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoandei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
-  return vamoandei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoandei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
-  return vamoandei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoandei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoandei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoandei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoandei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoandei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
-  return vamoandei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoandei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
-  return vamoandei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoandei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
-  return vamoandei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoandei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
-  return vamoandei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoandei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoandei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoandei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
-  return vamoandei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoandei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
-  return vamoandei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoandei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoandei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoandei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
-  return vamoandei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoandei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
-  return vamoandei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoandei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
-  return vamoandei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoandei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
-  return vamoandei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoandei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoandei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoandei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
-  return vamoandei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoandei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
-  return vamoandei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoandei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoandei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoandei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
-  return vamoandei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoandei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
-  return vamoandei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoandei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoandei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoandei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoandei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoandei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoandei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoandei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoandei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoandei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoandei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoandei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoandei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoandei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoandei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoandei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoandei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoandei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoandei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoandei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoandei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoandei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoandei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoandei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoandei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoandei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoandei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoandei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoandei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoandei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoandei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoandei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoandei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoandei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoandei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoandei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoandei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoandei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoandei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoandei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoandei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoandei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoandei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoandei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoandei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoandei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoandei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoandei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoandei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoandei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoandei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoandei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoandei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoandei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoandei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoandei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoandei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoandei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoandei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoandei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoandei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoandei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoandei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoandei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoandei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoandei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoandei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoandei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoandei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoandei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoandei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoandei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoandei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoandei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
-  return vamoandei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoandei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoandei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoandei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
-  return vamoandei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoandei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
-  return vamoandei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoandei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoandei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoandei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoandei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoandei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
-  return vamoandei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoandei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
-  return vamoandei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoandei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
-  return vamoandei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoandei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoandei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoandei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
-  return vamoandei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoandei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoandei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoandei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
-  return vamoandei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoandei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
-  return vamoandei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoandei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoandei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoandei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoandei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoandei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
-  return vamoandei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoandei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
-  return vamoandei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoandei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
-  return vamoandei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoandei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
-  return vamoandei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoandei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoandei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoandei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
-  return vamoandei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoandei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
-  return vamoandei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoandei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoandei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoandei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
-  return vamoandei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoandei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
-  return vamoandei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoandei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
-  return vamoandei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoandei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
-  return vamoandei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoandei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoandei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoandei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
-  return vamoandei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoandei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
-  return vamoandei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoandei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoandei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoandei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
-  return vamoandei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoandei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
-  return vamoandei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoandei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoandei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoandei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoandei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoandei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoandei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoandei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoandei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoandei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoandei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoandei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoandei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoandei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoandei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoandei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoandei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoandei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoandei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoandei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoandei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoandei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoandei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoandei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoandei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoandei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoandei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoandei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoandei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoandei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoandei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoandei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoandei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoandei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoandei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoandei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoandei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoandei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoandei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoandei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoandei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoandei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoandei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoandei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoandei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoandei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoandei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoandei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoandei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoandei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoandei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoandei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoandei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoandei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoandei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoandei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoandei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoandei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoandei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoandei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoandei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoandei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoandei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoandei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoandei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoandei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoandei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoandei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoandei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoandei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoandei64(mask, base, bindex, value, vl);
-}
-

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomax.c
deleted file mode 100644
index 07f2894e814c5..0000000000000
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomax.c
+++ /dev/null
@@ -1,1406 +0,0 @@
-// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
-// REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
-
-#include <riscv_vector.h>
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamomaxei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
-  return vamomaxei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamomaxei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
-  return vamomaxei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamomaxei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
-  return vamomaxei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamomaxei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
-  return vamomaxei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamomaxei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
-  return vamomaxei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamomaxei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
-  return vamomaxei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamomaxei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
-  return vamomaxei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamomaxei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
-  return vamomaxei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamomaxei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
-  return vamomaxei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamomaxei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
-  return vamomaxei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamomaxei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
-  return vamomaxei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamomaxei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
-  return vamomaxei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamomaxei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
-  return vamomaxei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamomaxei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
-  return vamomaxei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamomaxei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
-  return vamomaxei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei64_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamomaxei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
-  return vamomaxei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamomaxei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
-  return vamomaxei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamomaxei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
-  return vamomaxei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamomaxei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
-  return vamomaxei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamomaxei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
-  return vamomaxei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamomaxei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
-  return vamomaxei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamomaxei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
-  return vamomaxei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamomaxei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
-  return vamomaxei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamomaxei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
-  return vamomaxei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamomaxei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
-  return vamomaxei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamomaxei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
-  return vamomaxei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamomaxei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
-  return vamomaxei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamomaxei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
-  return vamomaxei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamomaxei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
-  return vamomaxei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamomaxei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
-  return vamomaxei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamomaxei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
-  return vamomaxei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamomaxei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
-  return vamomaxei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamomaxei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
-  return vamomaxei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamomaxei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
-  return vamomaxei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamomaxei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
-  return vamomaxei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamomaxuei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamomaxuei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamomaxuei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
-  return vamomaxuei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamomaxuei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamomaxuei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamomaxuei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
-  return vamomaxuei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamomaxuei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
-  return vamomaxuei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamomaxuei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamomaxuei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamomaxuei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamomaxuei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamomaxuei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
-  return vamomaxuei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamomaxuei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
-  return vamomaxuei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamomaxuei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
-  return vamomaxuei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamomaxuei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamomaxuei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamomaxuei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
-  return vamomaxuei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamomaxuei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamomaxuei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamomaxuei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
-  return vamomaxuei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamomaxuei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
-  return vamomaxuei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamomaxuei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamomaxuei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamomaxuei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamomaxuei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamomaxuei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
-  return vamomaxuei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamomaxuei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
-  return vamomaxuei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamomaxuei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
-  return vamomaxuei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamomaxuei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
-  return vamomaxuei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamomaxuei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamomaxuei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamomaxuei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
-  return vamomaxuei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamomaxuei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
-  return vamomaxuei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamomaxuei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamomaxuei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamomaxuei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
-  return vamomaxuei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamomaxuei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
-  return vamomaxuei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamomaxuei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
-  return vamomaxuei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamomaxuei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
-  return vamomaxuei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamomaxuei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamomaxuei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamomaxuei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
-  return vamomaxuei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamomaxuei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
-  return vamomaxuei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamomaxuei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamomaxuei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamomaxuei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
-  return vamomaxuei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamomaxuei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
-  return vamomaxuei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamomaxei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
-  return vamomaxei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamomaxei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
-  return vamomaxei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamomaxei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
-  return vamomaxei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamomaxei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
-  return vamomaxei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamomaxei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
-  return vamomaxei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamomaxei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
-  return vamomaxei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamomaxei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
-  return vamomaxei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamomaxei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
-  return vamomaxei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamomaxei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
-  return vamomaxei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamomaxei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
-  return vamomaxei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamomaxei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
-  return vamomaxei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamomaxei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
-  return vamomaxei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamomaxei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
-  return vamomaxei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamomaxei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
-  return vamomaxei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamomaxei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
-  return vamomaxei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei64_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamomaxei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
-  return vamomaxei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamomaxei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
-  return vamomaxei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamomaxei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
-  return vamomaxei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamomaxei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
-  return vamomaxei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamomaxei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
-  return vamomaxei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamomaxei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
-  return vamomaxei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamomaxei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
-  return vamomaxei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamomaxei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
-  return vamomaxei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamomaxei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
-  return vamomaxei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamomaxei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
-  return vamomaxei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamomaxei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
-  return vamomaxei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamomaxei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
-  return vamomaxei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamomaxei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
-  return vamomaxei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamomaxei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
-  return vamomaxei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamomaxei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
-  return vamomaxei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamomaxei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
-  return vamomaxei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamomaxei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
-  return vamomaxei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamomaxei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
-  return vamomaxei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamomaxei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
-  return vamomaxei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamomaxei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
-  return vamomaxei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamomaxuei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamomaxuei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamomaxuei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
-  return vamomaxuei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamomaxuei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamomaxuei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamomaxuei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
-  return vamomaxuei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamomaxuei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
-  return vamomaxuei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamomaxuei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamomaxuei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamomaxuei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamomaxuei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamomaxuei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
-  return vamomaxuei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamomaxuei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
-  return vamomaxuei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamomaxuei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
-  return vamomaxuei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamomaxuei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamomaxuei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamomaxuei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
-  return vamomaxuei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamomaxuei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamomaxuei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamomaxuei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
-  return vamomaxuei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamomaxuei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
-  return vamomaxuei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamomaxuei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamomaxuei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamomaxuei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamomaxuei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamomaxuei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
-  return vamomaxuei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamomaxuei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
-  return vamomaxuei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamomaxuei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
-  return vamomaxuei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamomaxuei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
-  return vamomaxuei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamomaxuei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamomaxuei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamomaxuei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
-  return vamomaxuei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamomaxuei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
-  return vamomaxuei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamomaxuei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamomaxuei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamomaxuei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
-  return vamomaxuei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamomaxuei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
-  return vamomaxuei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamomaxuei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
-  return vamomaxuei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamomaxuei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
-  return vamomaxuei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamomaxuei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamomaxuei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamomaxuei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
-  return vamomaxuei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamomaxuei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
-  return vamomaxuei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamomaxuei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamomaxuei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamomaxuei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
-  return vamomaxuei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamomaxuei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
-  return vamomaxuei64(mask, base, bindex, value, vl);
-}
-

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomin.c
deleted file mode 100644
index 352e81d97642b..0000000000000
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomin.c
+++ /dev/null
@@ -1,1406 +0,0 @@
-// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
-// REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
-
-#include <riscv_vector.h>
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamominei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
-  return vamominei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamominei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
-  return vamominei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamominei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
-  return vamominei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamominei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
-  return vamominei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamominei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
-  return vamominei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamominei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
-  return vamominei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamominei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
-  return vamominei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamominei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
-  return vamominei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamominei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
-  return vamominei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamominei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
-  return vamominei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamominei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
-  return vamominei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamominei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
-  return vamominei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamominei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
-  return vamominei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamominei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
-  return vamominei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamominei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
-  return vamominei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei64_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamominei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
-  return vamominei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei64_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamominei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
-  return vamominei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei64_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamominei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
-  return vamominei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei64_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamominei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
-  return vamominei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamominei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
-  return vamominei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamominei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
-  return vamominei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamominei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
-  return vamominei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamominei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
-  return vamominei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamominei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
-  return vamominei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamominei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
-  return vamominei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamominei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
-  return vamominei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamominei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
-  return vamominei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamominei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
-  return vamominei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamominei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
-  return vamominei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamominei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
-  return vamominei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamominei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
-  return vamominei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei64_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamominei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
-  return vamominei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei64_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamominei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
-  return vamominei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei64_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamominei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
-  return vamominei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei64_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamominei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
-  return vamominei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamominuei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamominuei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamominuei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
-  return vamominuei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamominuei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamominuei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamominuei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
-  return vamominuei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamominuei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
-  return vamominuei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamominuei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamominuei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamominuei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamominuei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamominuei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
-  return vamominuei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamominuei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
-  return vamominuei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamominuei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
-  return vamominuei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamominuei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamominuei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamominuei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
-  return vamominuei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamominuei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamominuei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamominuei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
-  return vamominuei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamominuei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
-  return vamominuei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei64_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamominuei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamominuei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei64_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamominuei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamominuei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei64_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamominuei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
-  return vamominuei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei64_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamominuei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
-  return vamominuei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamominuei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
-  return vamominuei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamominuei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
-  return vamominuei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamominuei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamominuei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamominuei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
-  return vamominuei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamominuei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
-  return vamominuei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamominuei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamominuei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamominuei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
-  return vamominuei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamominuei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
-  return vamominuei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamominuei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
-  return vamominuei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamominuei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
-  return vamominuei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamominuei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamominuei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamominuei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
-  return vamominuei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei64_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamominuei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
-  return vamominuei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei64_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamominuei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamominuei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei64_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamominuei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
-  return vamominuei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei64_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamominuei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
-  return vamominuei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamominei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
-  return vamominei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamominei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
-  return vamominei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamominei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
-  return vamominei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamominei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
-  return vamominei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamominei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
-  return vamominei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamominei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
-  return vamominei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamominei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
-  return vamominei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamominei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
-  return vamominei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamominei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
-  return vamominei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamominei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
-  return vamominei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamominei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
-  return vamominei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamominei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
-  return vamominei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamominei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
-  return vamominei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamominei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
-  return vamominei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamominei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
-  return vamominei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei64_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamominei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
-  return vamominei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei64_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamominei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
-  return vamominei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei64_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamominei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
-  return vamominei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei64_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamominei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
-  return vamominei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamominei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
-  return vamominei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamominei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
-  return vamominei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamominei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
-  return vamominei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamominei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
-  return vamominei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamominei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
-  return vamominei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamominei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
-  return vamominei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamominei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
-  return vamominei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamominei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
-  return vamominei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamominei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
-  return vamominei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamominei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
-  return vamominei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamominei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
-  return vamominei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamominei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
-  return vamominei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei64_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamominei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
-  return vamominei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei64_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamominei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
-  return vamominei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei64_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamominei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
-  return vamominei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei64_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamominei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
-  return vamominei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamominuei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamominuei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamominuei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
-  return vamominuei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamominuei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamominuei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamominuei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
-  return vamominuei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamominuei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
-  return vamominuei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamominuei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamominuei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamominuei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamominuei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamominuei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
-  return vamominuei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamominuei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
-  return vamominuei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamominuei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
-  return vamominuei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamominuei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamominuei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamominuei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
-  return vamominuei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamominuei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamominuei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamominuei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
-  return vamominuei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamominuei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
-  return vamominuei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei64_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamominuei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamominuei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei64_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamominuei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamominuei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei64_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamominuei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
-  return vamominuei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei64_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamominuei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
-  return vamominuei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamominuei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
-  return vamominuei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamominuei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
-  return vamominuei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamominuei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamominuei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamominuei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
-  return vamominuei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamominuei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
-  return vamominuei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamominuei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamominuei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamominuei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
-  return vamominuei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamominuei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
-  return vamominuei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamominuei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
-  return vamominuei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamominuei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
-  return vamominuei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamominuei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamominuei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamominuei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
-  return vamominuei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei64_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamominuei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
-  return vamominuei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei64_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamominuei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamominuei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei64_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamominuei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
-  return vamominuei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei64_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamominuei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
-  return vamominuei64(mask, base, bindex, value, vl);
-}
-

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoor.c
deleted file mode 100644
index 43de42cbceb4f..0000000000000
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoor.c
+++ /dev/null
@@ -1,1406 +0,0 @@
-// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
-// REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
-
-#include <riscv_vector.h>
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoorei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoorei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
-  return vamoorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoorei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoorei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
-  return vamoorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoorei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
-  return vamoorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoorei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoorei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoorei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
-  return vamoorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoorei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
-  return vamoorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoorei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
-  return vamoorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoorei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoorei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
-  return vamoorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoorei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoorei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
-  return vamoorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoorei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
-  return vamoorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoorei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoorei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoorei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoorei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoorei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
-  return vamoorei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoorei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
-  return vamoorei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoorei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
-  return vamoorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoorei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
-  return vamoorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoorei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoorei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
-  return vamoorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoorei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
-  return vamoorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoorei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoorei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
-  return vamoorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoorei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
-  return vamoorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoorei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
-  return vamoorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoorei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
-  return vamoorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoorei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoorei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
-  return vamoorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoorei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
-  return vamoorei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoorei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoorei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoorei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
-  return vamoorei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoorei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
-  return vamoorei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoorei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoorei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoorei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoorei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoorei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoorei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoorei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoorei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoorei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoorei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoorei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoorei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoorei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoorei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoorei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoorei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoorei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoorei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoorei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoorei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoorei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoorei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoorei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoorei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoorei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoorei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoorei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoorei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoorei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoorei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoorei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoorei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoorei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoorei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoorei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoorei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoorei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoorei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoorei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoorei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoorei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoorei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoorei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoorei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoorei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
-  return vamoorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoorei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoorei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
-  return vamoorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoorei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
-  return vamoorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoorei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoorei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoorei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
-  return vamoorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoorei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
-  return vamoorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoorei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
-  return vamoorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoorei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoorei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
-  return vamoorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoorei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoorei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
-  return vamoorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoorei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
-  return vamoorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoorei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoorei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoorei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoorei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoorei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
-  return vamoorei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoorei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
-  return vamoorei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoorei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
-  return vamoorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoorei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
-  return vamoorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoorei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoorei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
-  return vamoorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoorei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
-  return vamoorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoorei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoorei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
-  return vamoorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoorei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
-  return vamoorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoorei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
-  return vamoorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoorei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
-  return vamoorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoorei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoorei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
-  return vamoorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoorei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
-  return vamoorei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoorei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoorei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoorei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
-  return vamoorei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoorei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
-  return vamoorei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoorei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoorei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoorei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoorei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoorei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoorei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoorei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoorei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoorei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoorei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoorei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoorei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoorei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoorei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoorei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoorei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoorei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoorei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoorei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoorei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoorei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoorei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoorei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoorei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoorei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoorei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoorei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoorei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoorei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoorei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoorei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoorei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoorei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoorei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoorei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoorei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoorei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoorei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoorei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoorei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoorei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoorei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoorei64(mask, base, bindex, value, vl);
-}
-

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoswap.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoswap.c
deleted file mode 100644
index d7646cd4c24dd..0000000000000
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoswap.c
+++ /dev/null
@@ -1,2421 +0,0 @@
-// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
-// REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
-// RUN:   -target-feature +experimental-zfh -target-feature +experimental-zvamo -disable-O0-optnone -fallow-half-arguments-and-returns -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
-
-#include <riscv_vector.h>
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoswapei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex,
-                                      vint32mf2_t value, size_t vl) {
-  return vamoswapei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoswapei8_v_i32m1(int32_t *base, vuint8mf4_t bindex,
-                                    vint32m1_t value, size_t vl) {
-  return vamoswapei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoswapei8_v_i32m2(int32_t *base, vuint8mf2_t bindex,
-                                    vint32m2_t value, size_t vl) {
-  return vamoswapei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoswapei8_v_i32m4(int32_t *base, vuint8m1_t bindex,
-                                    vint32m4_t value, size_t vl) {
-  return vamoswapei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoswapei8_v_i32m8(int32_t *base, vuint8m2_t bindex,
-                                    vint32m8_t value, size_t vl) {
-  return vamoswapei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoswapei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex,
-                                       vint32mf2_t value, size_t vl) {
-  return vamoswapei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoswapei16_v_i32m1(int32_t *base, vuint16mf2_t bindex,
-                                     vint32m1_t value, size_t vl) {
-  return vamoswapei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoswapei16_v_i32m2(int32_t *base, vuint16m1_t bindex,
-                                     vint32m2_t value, size_t vl) {
-  return vamoswapei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoswapei16_v_i32m4(int32_t *base, vuint16m2_t bindex,
-                                     vint32m4_t value, size_t vl) {
-  return vamoswapei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoswapei16_v_i32m8(int32_t *base, vuint16m4_t bindex,
-                                     vint32m8_t value, size_t vl) {
-  return vamoswapei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoswapei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex,
-                                       vint32mf2_t value, size_t vl) {
-  return vamoswapei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoswapei32_v_i32m1(int32_t *base, vuint32m1_t bindex,
-                                     vint32m1_t value, size_t vl) {
-  return vamoswapei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoswapei32_v_i32m2(int32_t *base, vuint32m2_t bindex,
-                                     vint32m2_t value, size_t vl) {
-  return vamoswapei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoswapei32_v_i32m4(int32_t *base, vuint32m4_t bindex,
-                                     vint32m4_t value, size_t vl) {
-  return vamoswapei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoswapei32_v_i32m8(int32_t *base, vuint32m8_t bindex,
-                                     vint32m8_t value, size_t vl) {
-  return vamoswapei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoswapei64_v_i32mf2(int32_t *base, vuint64m1_t bindex,
-                                       vint32mf2_t value, size_t vl) {
-  return vamoswapei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoswapei64_v_i32m1(int32_t *base, vuint64m2_t bindex,
-                                     vint32m1_t value, size_t vl) {
-  return vamoswapei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoswapei64_v_i32m2(int32_t *base, vuint64m4_t bindex,
-                                     vint32m2_t value, size_t vl) {
-  return vamoswapei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoswapei64_v_i32m4(int32_t *base, vuint64m8_t bindex,
-                                     vint32m4_t value, size_t vl) {
-  return vamoswapei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoswapei8_v_i64m1(int64_t *base, vuint8mf8_t bindex,
-                                    vint64m1_t value, size_t vl) {
-  return vamoswapei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoswapei8_v_i64m2(int64_t *base, vuint8mf4_t bindex,
-                                    vint64m2_t value, size_t vl) {
-  return vamoswapei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoswapei8_v_i64m4(int64_t *base, vuint8mf2_t bindex,
-                                    vint64m4_t value, size_t vl) {
-  return vamoswapei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoswapei8_v_i64m8(int64_t *base, vuint8m1_t bindex,
-                                    vint64m8_t value, size_t vl) {
-  return vamoswapei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoswapei16_v_i64m1(int64_t *base, vuint16mf4_t bindex,
-                                     vint64m1_t value, size_t vl) {
-  return vamoswapei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoswapei16_v_i64m2(int64_t *base, vuint16mf2_t bindex,
-                                     vint64m2_t value, size_t vl) {
-  return vamoswapei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoswapei16_v_i64m4(int64_t *base, vuint16m1_t bindex,
-                                     vint64m4_t value, size_t vl) {
-  return vamoswapei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoswapei16_v_i64m8(int64_t *base, vuint16m2_t bindex,
-                                     vint64m8_t value, size_t vl) {
-  return vamoswapei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoswapei32_v_i64m1(int64_t *base, vuint32mf2_t bindex,
-                                     vint64m1_t value, size_t vl) {
-  return vamoswapei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoswapei32_v_i64m2(int64_t *base, vuint32m1_t bindex,
-                                     vint64m2_t value, size_t vl) {
-  return vamoswapei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoswapei32_v_i64m4(int64_t *base, vuint32m2_t bindex,
-                                     vint64m4_t value, size_t vl) {
-  return vamoswapei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoswapei32_v_i64m8(int64_t *base, vuint32m4_t bindex,
-                                     vint64m8_t value, size_t vl) {
-  return vamoswapei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoswapei64_v_i64m1(int64_t *base, vuint64m1_t bindex,
-                                     vint64m1_t value, size_t vl) {
-  return vamoswapei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoswapei64_v_i64m2(int64_t *base, vuint64m2_t bindex,
-                                     vint64m2_t value, size_t vl) {
-  return vamoswapei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoswapei64_v_i64m4(int64_t *base, vuint64m4_t bindex,
-                                     vint64m4_t value, size_t vl) {
-  return vamoswapei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoswapei64_v_i64m8(int64_t *base, vuint64m8_t bindex,
-                                     vint64m8_t value, size_t vl) {
-  return vamoswapei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoswapei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex,
-                                       vuint32mf2_t value, size_t vl) {
-  return vamoswapei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoswapei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex,
-                                     vuint32m1_t value, size_t vl) {
-  return vamoswapei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoswapei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex,
-                                     vuint32m2_t value, size_t vl) {
-  return vamoswapei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoswapei8_v_u32m4(uint32_t *base, vuint8m1_t bindex,
-                                     vuint32m4_t value, size_t vl) {
-  return vamoswapei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoswapei8_v_u32m8(uint32_t *base, vuint8m2_t bindex,
-                                     vuint32m8_t value, size_t vl) {
-  return vamoswapei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoswapei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex,
-                                        vuint32mf2_t value, size_t vl) {
-  return vamoswapei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoswapei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex,
-                                      vuint32m1_t value, size_t vl) {
-  return vamoswapei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoswapei16_v_u32m2(uint32_t *base, vuint16m1_t bindex,
-                                      vuint32m2_t value, size_t vl) {
-  return vamoswapei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoswapei16_v_u32m4(uint32_t *base, vuint16m2_t bindex,
-                                      vuint32m4_t value, size_t vl) {
-  return vamoswapei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoswapei16_v_u32m8(uint32_t *base, vuint16m4_t bindex,
-                                      vuint32m8_t value, size_t vl) {
-  return vamoswapei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoswapei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex,
-                                        vuint32mf2_t value, size_t vl) {
-  return vamoswapei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoswapei32_v_u32m1(uint32_t *base, vuint32m1_t bindex,
-                                      vuint32m1_t value, size_t vl) {
-  return vamoswapei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoswapei32_v_u32m2(uint32_t *base, vuint32m2_t bindex,
-                                      vuint32m2_t value, size_t vl) {
-  return vamoswapei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoswapei32_v_u32m4(uint32_t *base, vuint32m4_t bindex,
-                                      vuint32m4_t value, size_t vl) {
-  return vamoswapei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoswapei32_v_u32m8(uint32_t *base, vuint32m8_t bindex,
-                                      vuint32m8_t value, size_t vl) {
-  return vamoswapei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoswapei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex,
-                                        vuint32mf2_t value, size_t vl) {
-  return vamoswapei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoswapei64_v_u32m1(uint32_t *base, vuint64m2_t bindex,
-                                      vuint32m1_t value, size_t vl) {
-  return vamoswapei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoswapei64_v_u32m2(uint32_t *base, vuint64m4_t bindex,
-                                      vuint32m2_t value, size_t vl) {
-  return vamoswapei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoswapei64_v_u32m4(uint32_t *base, vuint64m8_t bindex,
-                                      vuint32m4_t value, size_t vl) {
-  return vamoswapei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoswapei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex,
-                                     vuint64m1_t value, size_t vl) {
-  return vamoswapei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoswapei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex,
-                                     vuint64m2_t value, size_t vl) {
-  return vamoswapei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoswapei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex,
-                                     vuint64m4_t value, size_t vl) {
-  return vamoswapei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoswapei8_v_u64m8(uint64_t *base, vuint8m1_t bindex,
-                                     vuint64m8_t value, size_t vl) {
-  return vamoswapei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoswapei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex,
-                                      vuint64m1_t value, size_t vl) {
-  return vamoswapei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoswapei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex,
-                                      vuint64m2_t value, size_t vl) {
-  return vamoswapei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoswapei16_v_u64m4(uint64_t *base, vuint16m1_t bindex,
-                                      vuint64m4_t value, size_t vl) {
-  return vamoswapei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoswapei16_v_u64m8(uint64_t *base, vuint16m2_t bindex,
-                                      vuint64m8_t value, size_t vl) {
-  return vamoswapei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoswapei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex,
-                                      vuint64m1_t value, size_t vl) {
-  return vamoswapei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoswapei32_v_u64m2(uint64_t *base, vuint32m1_t bindex,
-                                      vuint64m2_t value, size_t vl) {
-  return vamoswapei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoswapei32_v_u64m4(uint64_t *base, vuint32m2_t bindex,
-                                      vuint64m4_t value, size_t vl) {
-  return vamoswapei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoswapei32_v_u64m8(uint64_t *base, vuint32m4_t bindex,
-                                      vuint64m8_t value, size_t vl) {
-  return vamoswapei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoswapei64_v_u64m1(uint64_t *base, vuint64m1_t bindex,
-                                      vuint64m1_t value, size_t vl) {
-  return vamoswapei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoswapei64_v_u64m2(uint64_t *base, vuint64m2_t bindex,
-                                      vuint64m2_t value, size_t vl) {
-  return vamoswapei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoswapei64_v_u64m4(uint64_t *base, vuint64m4_t bindex,
-                                      vuint64m4_t value, size_t vl) {
-  return vamoswapei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoswapei64_v_u64m8(uint64_t *base, vuint64m8_t bindex,
-                                      vuint64m8_t value, size_t vl) {
-  return vamoswapei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i8.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
-//
-vfloat32mf2_t test_vamoswapei8_v_f32mf2(float *base, vuint8mf8_t bindex,
-                                        vfloat32mf2_t value, size_t vl) {
-  return vamoswapei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i8.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
-//
-vfloat32m1_t test_vamoswapei8_v_f32m1(float *base, vuint8mf4_t bindex,
-                                      vfloat32m1_t value, size_t vl) {
-  return vamoswapei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i8.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
-//
-vfloat32m2_t test_vamoswapei8_v_f32m2(float *base, vuint8mf2_t bindex,
-                                      vfloat32m2_t value, size_t vl) {
-  return vamoswapei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i8.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
-//
-vfloat32m4_t test_vamoswapei8_v_f32m4(float *base, vuint8m1_t bindex,
-                                      vfloat32m4_t value, size_t vl) {
-  return vamoswapei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i8.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
-//
-vfloat32m8_t test_vamoswapei8_v_f32m8(float *base, vuint8m2_t bindex,
-                                      vfloat32m8_t value, size_t vl) {
-  return vamoswapei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i16.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
-//
-vfloat32mf2_t test_vamoswapei16_v_f32mf2(float *base, vuint16mf4_t bindex,
-                                         vfloat32mf2_t value, size_t vl) {
-  return vamoswapei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i16.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
-//
-vfloat32m1_t test_vamoswapei16_v_f32m1(float *base, vuint16mf2_t bindex,
-                                       vfloat32m1_t value, size_t vl) {
-  return vamoswapei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i16.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
-//
-vfloat32m2_t test_vamoswapei16_v_f32m2(float *base, vuint16m1_t bindex,
-                                       vfloat32m2_t value, size_t vl) {
-  return vamoswapei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i16.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
-//
-vfloat32m4_t test_vamoswapei16_v_f32m4(float *base, vuint16m2_t bindex,
-                                       vfloat32m4_t value, size_t vl) {
-  return vamoswapei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i16.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
-//
-vfloat32m8_t test_vamoswapei16_v_f32m8(float *base, vuint16m4_t bindex,
-                                       vfloat32m8_t value, size_t vl) {
-  return vamoswapei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i32.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
-//
-vfloat32mf2_t test_vamoswapei32_v_f32mf2(float *base, vuint32mf2_t bindex,
-                                         vfloat32mf2_t value, size_t vl) {
-  return vamoswapei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i32.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
-//
-vfloat32m1_t test_vamoswapei32_v_f32m1(float *base, vuint32m1_t bindex,
-                                       vfloat32m1_t value, size_t vl) {
-  return vamoswapei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i32.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
-//
-vfloat32m2_t test_vamoswapei32_v_f32m2(float *base, vuint32m2_t bindex,
-                                       vfloat32m2_t value, size_t vl) {
-  return vamoswapei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i32.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
-//
-vfloat32m4_t test_vamoswapei32_v_f32m4(float *base, vuint32m4_t bindex,
-                                       vfloat32m4_t value, size_t vl) {
-  return vamoswapei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i32.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
-//
-vfloat32m8_t test_vamoswapei32_v_f32m8(float *base, vuint32m8_t bindex,
-                                       vfloat32m8_t value, size_t vl) {
-  return vamoswapei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_f32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i64.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
-//
-vfloat32mf2_t test_vamoswapei64_v_f32mf2(float *base, vuint64m1_t bindex,
-                                         vfloat32mf2_t value, size_t vl) {
-  return vamoswapei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i64.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
-//
-vfloat32m1_t test_vamoswapei64_v_f32m1(float *base, vuint64m2_t bindex,
-                                       vfloat32m1_t value, size_t vl) {
-  return vamoswapei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i64.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
-//
-vfloat32m2_t test_vamoswapei64_v_f32m2(float *base, vuint64m4_t bindex,
-                                       vfloat32m2_t value, size_t vl) {
-  return vamoswapei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i64.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
-//
-vfloat32m4_t test_vamoswapei64_v_f32m4(float *base, vuint64m8_t bindex,
-                                       vfloat32m4_t value, size_t vl) {
-  return vamoswapei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i8.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
-//
-vfloat64m1_t test_vamoswapei8_v_f64m1(double *base, vuint8mf8_t bindex,
-                                      vfloat64m1_t value, size_t vl) {
-  return vamoswapei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i8.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
-//
-vfloat64m2_t test_vamoswapei8_v_f64m2(double *base, vuint8mf4_t bindex,
-                                      vfloat64m2_t value, size_t vl) {
-  return vamoswapei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i8.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
-//
-vfloat64m4_t test_vamoswapei8_v_f64m4(double *base, vuint8mf2_t bindex,
-                                      vfloat64m4_t value, size_t vl) {
-  return vamoswapei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i8.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
-//
-vfloat64m8_t test_vamoswapei8_v_f64m8(double *base, vuint8m1_t bindex,
-                                      vfloat64m8_t value, size_t vl) {
-  return vamoswapei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i16.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
-//
-vfloat64m1_t test_vamoswapei16_v_f64m1(double *base, vuint16mf4_t bindex,
-                                       vfloat64m1_t value, size_t vl) {
-  return vamoswapei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i16.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
-//
-vfloat64m2_t test_vamoswapei16_v_f64m2(double *base, vuint16mf2_t bindex,
-                                       vfloat64m2_t value, size_t vl) {
-  return vamoswapei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i16.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
-//
-vfloat64m4_t test_vamoswapei16_v_f64m4(double *base, vuint16m1_t bindex,
-                                       vfloat64m4_t value, size_t vl) {
-  return vamoswapei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i16.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
-//
-vfloat64m8_t test_vamoswapei16_v_f64m8(double *base, vuint16m2_t bindex,
-                                       vfloat64m8_t value, size_t vl) {
-  return vamoswapei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i32.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
-//
-vfloat64m1_t test_vamoswapei32_v_f64m1(double *base, vuint32mf2_t bindex,
-                                       vfloat64m1_t value, size_t vl) {
-  return vamoswapei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i32.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
-//
-vfloat64m2_t test_vamoswapei32_v_f64m2(double *base, vuint32m1_t bindex,
-                                       vfloat64m2_t value, size_t vl) {
-  return vamoswapei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i32.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
-//
-vfloat64m4_t test_vamoswapei32_v_f64m4(double *base, vuint32m2_t bindex,
-                                       vfloat64m4_t value, size_t vl) {
-  return vamoswapei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i32.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
-//
-vfloat64m8_t test_vamoswapei32_v_f64m8(double *base, vuint32m4_t bindex,
-                                       vfloat64m8_t value, size_t vl) {
-  return vamoswapei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i64.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
-//
-vfloat64m1_t test_vamoswapei64_v_f64m1(double *base, vuint64m1_t bindex,
-                                       vfloat64m1_t value, size_t vl) {
-  return vamoswapei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i64.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
-//
-vfloat64m2_t test_vamoswapei64_v_f64m2(double *base, vuint64m2_t bindex,
-                                       vfloat64m2_t value, size_t vl) {
-  return vamoswapei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i64.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
-//
-vfloat64m4_t test_vamoswapei64_v_f64m4(double *base, vuint64m4_t bindex,
-                                       vfloat64m4_t value, size_t vl) {
-  return vamoswapei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i64.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
-//
-vfloat64m8_t test_vamoswapei64_v_f64m8(double *base, vuint64m8_t bindex,
-                                       vfloat64m8_t value, size_t vl) {
-  return vamoswapei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoswapei8_v_i32mf2_m(vbool64_t mask, int32_t *base,
-                                        vuint8mf8_t bindex, vint32mf2_t value,
-                                        size_t vl) {
-  return vamoswapei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoswapei8_v_i32m1_m(vbool32_t mask, int32_t *base,
-                                      vuint8mf4_t bindex, vint32m1_t value,
-                                      size_t vl) {
-  return vamoswapei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoswapei8_v_i32m2_m(vbool16_t mask, int32_t *base,
-                                      vuint8mf2_t bindex, vint32m2_t value,
-                                      size_t vl) {
-  return vamoswapei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoswapei8_v_i32m4_m(vbool8_t mask, int32_t *base,
-                                      vuint8m1_t bindex, vint32m4_t value,
-                                      size_t vl) {
-  return vamoswapei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoswapei8_v_i32m8_m(vbool4_t mask, int32_t *base,
-                                      vuint8m2_t bindex, vint32m8_t value,
-                                      size_t vl) {
-  return vamoswapei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoswapei16_v_i32mf2_m(vbool64_t mask, int32_t *base,
-                                         vuint16mf4_t bindex, vint32mf2_t value,
-                                         size_t vl) {
-  return vamoswapei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoswapei16_v_i32m1_m(vbool32_t mask, int32_t *base,
-                                       vuint16mf2_t bindex, vint32m1_t value,
-                                       size_t vl) {
-  return vamoswapei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoswapei16_v_i32m2_m(vbool16_t mask, int32_t *base,
-                                       vuint16m1_t bindex, vint32m2_t value,
-                                       size_t vl) {
-  return vamoswapei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoswapei16_v_i32m4_m(vbool8_t mask, int32_t *base,
-                                       vuint16m2_t bindex, vint32m4_t value,
-                                       size_t vl) {
-  return vamoswapei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoswapei16_v_i32m8_m(vbool4_t mask, int32_t *base,
-                                       vuint16m4_t bindex, vint32m8_t value,
-                                       size_t vl) {
-  return vamoswapei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoswapei32_v_i32mf2_m(vbool64_t mask, int32_t *base,
-                                         vuint32mf2_t bindex, vint32mf2_t value,
-                                         size_t vl) {
-  return vamoswapei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoswapei32_v_i32m1_m(vbool32_t mask, int32_t *base,
-                                       vuint32m1_t bindex, vint32m1_t value,
-                                       size_t vl) {
-  return vamoswapei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoswapei32_v_i32m2_m(vbool16_t mask, int32_t *base,
-                                       vuint32m2_t bindex, vint32m2_t value,
-                                       size_t vl) {
-  return vamoswapei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoswapei32_v_i32m4_m(vbool8_t mask, int32_t *base,
-                                       vuint32m4_t bindex, vint32m4_t value,
-                                       size_t vl) {
-  return vamoswapei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoswapei32_v_i32m8_m(vbool4_t mask, int32_t *base,
-                                       vuint32m8_t bindex, vint32m8_t value,
-                                       size_t vl) {
-  return vamoswapei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoswapei64_v_i32mf2_m(vbool64_t mask, int32_t *base,
-                                         vuint64m1_t bindex, vint32mf2_t value,
-                                         size_t vl) {
-  return vamoswapei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoswapei64_v_i32m1_m(vbool32_t mask, int32_t *base,
-                                       vuint64m2_t bindex, vint32m1_t value,
-                                       size_t vl) {
-  return vamoswapei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoswapei64_v_i32m2_m(vbool16_t mask, int32_t *base,
-                                       vuint64m4_t bindex, vint32m2_t value,
-                                       size_t vl) {
-  return vamoswapei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoswapei64_v_i32m4_m(vbool8_t mask, int32_t *base,
-                                       vuint64m8_t bindex, vint32m4_t value,
-                                       size_t vl) {
-  return vamoswapei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoswapei8_v_i64m1_m(vbool64_t mask, int64_t *base,
-                                      vuint8mf8_t bindex, vint64m1_t value,
-                                      size_t vl) {
-  return vamoswapei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoswapei8_v_i64m2_m(vbool32_t mask, int64_t *base,
-                                      vuint8mf4_t bindex, vint64m2_t value,
-                                      size_t vl) {
-  return vamoswapei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoswapei8_v_i64m4_m(vbool16_t mask, int64_t *base,
-                                      vuint8mf2_t bindex, vint64m4_t value,
-                                      size_t vl) {
-  return vamoswapei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoswapei8_v_i64m8_m(vbool8_t mask, int64_t *base,
-                                      vuint8m1_t bindex, vint64m8_t value,
-                                      size_t vl) {
-  return vamoswapei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoswapei16_v_i64m1_m(vbool64_t mask, int64_t *base,
-                                       vuint16mf4_t bindex, vint64m1_t value,
-                                       size_t vl) {
-  return vamoswapei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoswapei16_v_i64m2_m(vbool32_t mask, int64_t *base,
-                                       vuint16mf2_t bindex, vint64m2_t value,
-                                       size_t vl) {
-  return vamoswapei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoswapei16_v_i64m4_m(vbool16_t mask, int64_t *base,
-                                       vuint16m1_t bindex, vint64m4_t value,
-                                       size_t vl) {
-  return vamoswapei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoswapei16_v_i64m8_m(vbool8_t mask, int64_t *base,
-                                       vuint16m2_t bindex, vint64m8_t value,
-                                       size_t vl) {
-  return vamoswapei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoswapei32_v_i64m1_m(vbool64_t mask, int64_t *base,
-                                       vuint32mf2_t bindex, vint64m1_t value,
-                                       size_t vl) {
-  return vamoswapei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoswapei32_v_i64m2_m(vbool32_t mask, int64_t *base,
-                                       vuint32m1_t bindex, vint64m2_t value,
-                                       size_t vl) {
-  return vamoswapei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoswapei32_v_i64m4_m(vbool16_t mask, int64_t *base,
-                                       vuint32m2_t bindex, vint64m4_t value,
-                                       size_t vl) {
-  return vamoswapei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoswapei32_v_i64m8_m(vbool8_t mask, int64_t *base,
-                                       vuint32m4_t bindex, vint64m8_t value,
-                                       size_t vl) {
-  return vamoswapei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoswapei64_v_i64m1_m(vbool64_t mask, int64_t *base,
-                                       vuint64m1_t bindex, vint64m1_t value,
-                                       size_t vl) {
-  return vamoswapei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoswapei64_v_i64m2_m(vbool32_t mask, int64_t *base,
-                                       vuint64m2_t bindex, vint64m2_t value,
-                                       size_t vl) {
-  return vamoswapei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoswapei64_v_i64m4_m(vbool16_t mask, int64_t *base,
-                                       vuint64m4_t bindex, vint64m4_t value,
-                                       size_t vl) {
-  return vamoswapei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoswapei64_v_i64m8_m(vbool8_t mask, int64_t *base,
-                                       vuint64m8_t bindex, vint64m8_t value,
-                                       size_t vl) {
-  return vamoswapei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoswapei8_v_u32mf2_m(vbool64_t mask, uint32_t *base,
-                                         vuint8mf8_t bindex, vuint32mf2_t value,
-                                         size_t vl) {
-  return vamoswapei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoswapei8_v_u32m1_m(vbool32_t mask, uint32_t *base,
-                                       vuint8mf4_t bindex, vuint32m1_t value,
-                                       size_t vl) {
-  return vamoswapei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoswapei8_v_u32m2_m(vbool16_t mask, uint32_t *base,
-                                       vuint8mf2_t bindex, vuint32m2_t value,
-                                       size_t vl) {
-  return vamoswapei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoswapei8_v_u32m4_m(vbool8_t mask, uint32_t *base,
-                                       vuint8m1_t bindex, vuint32m4_t value,
-                                       size_t vl) {
-  return vamoswapei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoswapei8_v_u32m8_m(vbool4_t mask, uint32_t *base,
-                                       vuint8m2_t bindex, vuint32m8_t value,
-                                       size_t vl) {
-  return vamoswapei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoswapei16_v_u32mf2_m(vbool64_t mask, uint32_t *base,
-                                          vuint16mf4_t bindex,
-                                          vuint32mf2_t value, size_t vl) {
-  return vamoswapei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoswapei16_v_u32m1_m(vbool32_t mask, uint32_t *base,
-                                        vuint16mf2_t bindex, vuint32m1_t value,
-                                        size_t vl) {
-  return vamoswapei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoswapei16_v_u32m2_m(vbool16_t mask, uint32_t *base,
-                                        vuint16m1_t bindex, vuint32m2_t value,
-                                        size_t vl) {
-  return vamoswapei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoswapei16_v_u32m4_m(vbool8_t mask, uint32_t *base,
-                                        vuint16m2_t bindex, vuint32m4_t value,
-                                        size_t vl) {
-  return vamoswapei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoswapei16_v_u32m8_m(vbool4_t mask, uint32_t *base,
-                                        vuint16m4_t bindex, vuint32m8_t value,
-                                        size_t vl) {
-  return vamoswapei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoswapei32_v_u32mf2_m(vbool64_t mask, uint32_t *base,
-                                          vuint32mf2_t bindex,
-                                          vuint32mf2_t value, size_t vl) {
-  return vamoswapei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoswapei32_v_u32m1_m(vbool32_t mask, uint32_t *base,
-                                        vuint32m1_t bindex, vuint32m1_t value,
-                                        size_t vl) {
-  return vamoswapei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoswapei32_v_u32m2_m(vbool16_t mask, uint32_t *base,
-                                        vuint32m2_t bindex, vuint32m2_t value,
-                                        size_t vl) {
-  return vamoswapei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoswapei32_v_u32m4_m(vbool8_t mask, uint32_t *base,
-                                        vuint32m4_t bindex, vuint32m4_t value,
-                                        size_t vl) {
-  return vamoswapei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoswapei32_v_u32m8_m(vbool4_t mask, uint32_t *base,
-                                        vuint32m8_t bindex, vuint32m8_t value,
-                                        size_t vl) {
-  return vamoswapei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoswapei64_v_u32mf2_m(vbool64_t mask, uint32_t *base,
-                                          vuint64m1_t bindex,
-                                          vuint32mf2_t value, size_t vl) {
-  return vamoswapei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoswapei64_v_u32m1_m(vbool32_t mask, uint32_t *base,
-                                        vuint64m2_t bindex, vuint32m1_t value,
-                                        size_t vl) {
-  return vamoswapei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoswapei64_v_u32m2_m(vbool16_t mask, uint32_t *base,
-                                        vuint64m4_t bindex, vuint32m2_t value,
-                                        size_t vl) {
-  return vamoswapei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoswapei64_v_u32m4_m(vbool8_t mask, uint32_t *base,
-                                        vuint64m8_t bindex, vuint32m4_t value,
-                                        size_t vl) {
-  return vamoswapei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoswapei8_v_u64m1_m(vbool64_t mask, uint64_t *base,
-                                       vuint8mf8_t bindex, vuint64m1_t value,
-                                       size_t vl) {
-  return vamoswapei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoswapei8_v_u64m2_m(vbool32_t mask, uint64_t *base,
-                                       vuint8mf4_t bindex, vuint64m2_t value,
-                                       size_t vl) {
-  return vamoswapei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoswapei8_v_u64m4_m(vbool16_t mask, uint64_t *base,
-                                       vuint8mf2_t bindex, vuint64m4_t value,
-                                       size_t vl) {
-  return vamoswapei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoswapei8_v_u64m8_m(vbool8_t mask, uint64_t *base,
-                                       vuint8m1_t bindex, vuint64m8_t value,
-                                       size_t vl) {
-  return vamoswapei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoswapei16_v_u64m1_m(vbool64_t mask, uint64_t *base,
-                                        vuint16mf4_t bindex, vuint64m1_t value,
-                                        size_t vl) {
-  return vamoswapei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoswapei16_v_u64m2_m(vbool32_t mask, uint64_t *base,
-                                        vuint16mf2_t bindex, vuint64m2_t value,
-                                        size_t vl) {
-  return vamoswapei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoswapei16_v_u64m4_m(vbool16_t mask, uint64_t *base,
-                                        vuint16m1_t bindex, vuint64m4_t value,
-                                        size_t vl) {
-  return vamoswapei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoswapei16_v_u64m8_m(vbool8_t mask, uint64_t *base,
-                                        vuint16m2_t bindex, vuint64m8_t value,
-                                        size_t vl) {
-  return vamoswapei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoswapei32_v_u64m1_m(vbool64_t mask, uint64_t *base,
-                                        vuint32mf2_t bindex, vuint64m1_t value,
-                                        size_t vl) {
-  return vamoswapei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoswapei32_v_u64m2_m(vbool32_t mask, uint64_t *base,
-                                        vuint32m1_t bindex, vuint64m2_t value,
-                                        size_t vl) {
-  return vamoswapei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoswapei32_v_u64m4_m(vbool16_t mask, uint64_t *base,
-                                        vuint32m2_t bindex, vuint64m4_t value,
-                                        size_t vl) {
-  return vamoswapei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoswapei32_v_u64m8_m(vbool8_t mask, uint64_t *base,
-                                        vuint32m4_t bindex, vuint64m8_t value,
-                                        size_t vl) {
-  return vamoswapei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoswapei64_v_u64m1_m(vbool64_t mask, uint64_t *base,
-                                        vuint64m1_t bindex, vuint64m1_t value,
-                                        size_t vl) {
-  return vamoswapei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoswapei64_v_u64m2_m(vbool32_t mask, uint64_t *base,
-                                        vuint64m2_t bindex, vuint64m2_t value,
-                                        size_t vl) {
-  return vamoswapei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoswapei64_v_u64m4_m(vbool16_t mask, uint64_t *base,
-                                        vuint64m4_t bindex, vuint64m4_t value,
-                                        size_t vl) {
-  return vamoswapei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoswapei64_v_u64m8_m(vbool8_t mask, uint64_t *base,
-                                        vuint64m8_t bindex, vuint64m8_t value,
-                                        size_t vl) {
-  return vamoswapei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i8.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
-//
-vfloat32mf2_t test_vamoswapei8_v_f32mf2_m(vbool64_t mask, float *base,
-                                          vuint8mf8_t bindex,
-                                          vfloat32mf2_t value, size_t vl) {
-  return vamoswapei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i8.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
-//
-vfloat32m1_t test_vamoswapei8_v_f32m1_m(vbool32_t mask, float *base,
-                                        vuint8mf4_t bindex, vfloat32m1_t value,
-                                        size_t vl) {
-  return vamoswapei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i8.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
-//
-vfloat32m2_t test_vamoswapei8_v_f32m2_m(vbool16_t mask, float *base,
-                                        vuint8mf2_t bindex, vfloat32m2_t value,
-                                        size_t vl) {
-  return vamoswapei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i8.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
-//
-vfloat32m4_t test_vamoswapei8_v_f32m4_m(vbool8_t mask, float *base,
-                                        vuint8m1_t bindex, vfloat32m4_t value,
-                                        size_t vl) {
-  return vamoswapei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i8.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
-//
-vfloat32m8_t test_vamoswapei8_v_f32m8_m(vbool4_t mask, float *base,
-                                        vuint8m2_t bindex, vfloat32m8_t value,
-                                        size_t vl) {
-  return vamoswapei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i16.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
-//
-vfloat32mf2_t test_vamoswapei16_v_f32mf2_m(vbool64_t mask, float *base,
-                                           vuint16mf4_t bindex,
-                                           vfloat32mf2_t value, size_t vl) {
-  return vamoswapei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i16.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
-//
-vfloat32m1_t test_vamoswapei16_v_f32m1_m(vbool32_t mask, float *base,
-                                         vuint16mf2_t bindex,
-                                         vfloat32m1_t value, size_t vl) {
-  return vamoswapei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i16.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
-//
-vfloat32m2_t test_vamoswapei16_v_f32m2_m(vbool16_t mask, float *base,
-                                         vuint16m1_t bindex, vfloat32m2_t value,
-                                         size_t vl) {
-  return vamoswapei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i16.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
-//
-vfloat32m4_t test_vamoswapei16_v_f32m4_m(vbool8_t mask, float *base,
-                                         vuint16m2_t bindex, vfloat32m4_t value,
-                                         size_t vl) {
-  return vamoswapei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i16.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
-//
-vfloat32m8_t test_vamoswapei16_v_f32m8_m(vbool4_t mask, float *base,
-                                         vuint16m4_t bindex, vfloat32m8_t value,
-                                         size_t vl) {
-  return vamoswapei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
-//
-vfloat32mf2_t test_vamoswapei32_v_f32mf2_m(vbool64_t mask, float *base,
-                                           vuint32mf2_t bindex,
-                                           vfloat32mf2_t value, size_t vl) {
-  return vamoswapei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i32.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
-//
-vfloat32m1_t test_vamoswapei32_v_f32m1_m(vbool32_t mask, float *base,
-                                         vuint32m1_t bindex, vfloat32m1_t value,
-                                         size_t vl) {
-  return vamoswapei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i32.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
-//
-vfloat32m2_t test_vamoswapei32_v_f32m2_m(vbool16_t mask, float *base,
-                                         vuint32m2_t bindex, vfloat32m2_t value,
-                                         size_t vl) {
-  return vamoswapei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i32.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
-//
-vfloat32m4_t test_vamoswapei32_v_f32m4_m(vbool8_t mask, float *base,
-                                         vuint32m4_t bindex, vfloat32m4_t value,
-                                         size_t vl) {
-  return vamoswapei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i32.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
-//
-vfloat32m8_t test_vamoswapei32_v_f32m8_m(vbool4_t mask, float *base,
-                                         vuint32m8_t bindex, vfloat32m8_t value,
-                                         size_t vl) {
-  return vamoswapei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_f32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i64.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
-//
-vfloat32mf2_t test_vamoswapei64_v_f32mf2_m(vbool64_t mask, float *base,
-                                           vuint64m1_t bindex,
-                                           vfloat32mf2_t value, size_t vl) {
-  return vamoswapei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i64.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
-//
-vfloat32m1_t test_vamoswapei64_v_f32m1_m(vbool32_t mask, float *base,
-                                         vuint64m2_t bindex, vfloat32m1_t value,
-                                         size_t vl) {
-  return vamoswapei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i64.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
-//
-vfloat32m2_t test_vamoswapei64_v_f32m2_m(vbool16_t mask, float *base,
-                                         vuint64m4_t bindex, vfloat32m2_t value,
-                                         size_t vl) {
-  return vamoswapei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i64.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
-//
-vfloat32m4_t test_vamoswapei64_v_f32m4_m(vbool8_t mask, float *base,
-                                         vuint64m8_t bindex, vfloat32m4_t value,
-                                         size_t vl) {
-  return vamoswapei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i8.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
-//
-vfloat64m1_t test_vamoswapei8_v_f64m1_m(vbool64_t mask, double *base,
-                                        vuint8mf8_t bindex, vfloat64m1_t value,
-                                        size_t vl) {
-  return vamoswapei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i8.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
-//
-vfloat64m2_t test_vamoswapei8_v_f64m2_m(vbool32_t mask, double *base,
-                                        vuint8mf4_t bindex, vfloat64m2_t value,
-                                        size_t vl) {
-  return vamoswapei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i8.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
-//
-vfloat64m4_t test_vamoswapei8_v_f64m4_m(vbool16_t mask, double *base,
-                                        vuint8mf2_t bindex, vfloat64m4_t value,
-                                        size_t vl) {
-  return vamoswapei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i8.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
-//
-vfloat64m8_t test_vamoswapei8_v_f64m8_m(vbool8_t mask, double *base,
-                                        vuint8m1_t bindex, vfloat64m8_t value,
-                                        size_t vl) {
-  return vamoswapei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i16.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
-//
-vfloat64m1_t test_vamoswapei16_v_f64m1_m(vbool64_t mask, double *base,
-                                         vuint16mf4_t bindex,
-                                         vfloat64m1_t value, size_t vl) {
-  return vamoswapei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i16.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
-//
-vfloat64m2_t test_vamoswapei16_v_f64m2_m(vbool32_t mask, double *base,
-                                         vuint16mf2_t bindex,
-                                         vfloat64m2_t value, size_t vl) {
-  return vamoswapei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i16.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
-//
-vfloat64m4_t test_vamoswapei16_v_f64m4_m(vbool16_t mask, double *base,
-                                         vuint16m1_t bindex, vfloat64m4_t value,
-                                         size_t vl) {
-  return vamoswapei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i16.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
-//
-vfloat64m8_t test_vamoswapei16_v_f64m8_m(vbool8_t mask, double *base,
-                                         vuint16m2_t bindex, vfloat64m8_t value,
-                                         size_t vl) {
-  return vamoswapei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
-//
-vfloat64m1_t test_vamoswapei32_v_f64m1_m(vbool64_t mask, double *base,
-                                         vuint32mf2_t bindex,
-                                         vfloat64m1_t value, size_t vl) {
-  return vamoswapei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i32.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
-//
-vfloat64m2_t test_vamoswapei32_v_f64m2_m(vbool32_t mask, double *base,
-                                         vuint32m1_t bindex, vfloat64m2_t value,
-                                         size_t vl) {
-  return vamoswapei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i32.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
-//
-vfloat64m4_t test_vamoswapei32_v_f64m4_m(vbool16_t mask, double *base,
-                                         vuint32m2_t bindex, vfloat64m4_t value,
-                                         size_t vl) {
-  return vamoswapei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i32.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
-//
-vfloat64m8_t test_vamoswapei32_v_f64m8_m(vbool8_t mask, double *base,
-                                         vuint32m4_t bindex, vfloat64m8_t value,
-                                         size_t vl) {
-  return vamoswapei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i64.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
-//
-vfloat64m1_t test_vamoswapei64_v_f64m1_m(vbool64_t mask, double *base,
-                                         vuint64m1_t bindex, vfloat64m1_t value,
-                                         size_t vl) {
-  return vamoswapei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i64.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
-//
-vfloat64m2_t test_vamoswapei64_v_f64m2_m(vbool32_t mask, double *base,
-                                         vuint64m2_t bindex, vfloat64m2_t value,
-                                         size_t vl) {
-  return vamoswapei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i64.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
-//
-vfloat64m4_t test_vamoswapei64_v_f64m4_m(vbool16_t mask, double *base,
-                                         vuint64m4_t bindex, vfloat64m4_t value,
-                                         size_t vl) {
-  return vamoswapei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i64.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
-//
-vfloat64m8_t test_vamoswapei64_v_f64m8_m(vbool8_t mask, double *base,
-                                         vuint64m8_t bindex, vfloat64m8_t value,
-                                         size_t vl) {
-  return vamoswapei64(mask, base, bindex, value, vl);
-}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoxor.c
deleted file mode 100644
index 9e162a2de5c22..0000000000000
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoxor.c
+++ /dev/null
@@ -1,1406 +0,0 @@
-// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
-// REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
-
-#include <riscv_vector.h>
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoxorei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoxorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoxorei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
-  return vamoxorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoxorei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoxorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoxorei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
-  return vamoxorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoxorei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
-  return vamoxorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoxorei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoxorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoxorei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoxorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoxorei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
-  return vamoxorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoxorei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
-  return vamoxorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoxorei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
-  return vamoxorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoxorei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoxorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoxorei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
-  return vamoxorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoxorei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoxorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoxorei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
-  return vamoxorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoxorei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
-  return vamoxorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoxorei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoxorei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoxorei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoxorei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoxorei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
-  return vamoxorei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoxorei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
-  return vamoxorei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoxorei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
-  return vamoxorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoxorei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
-  return vamoxorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoxorei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoxorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoxorei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
-  return vamoxorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoxorei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
-  return vamoxorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoxorei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoxorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoxorei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
-  return vamoxorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoxorei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
-  return vamoxorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoxorei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
-  return vamoxorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoxorei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
-  return vamoxorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoxorei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoxorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoxorei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
-  return vamoxorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoxorei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
-  return vamoxorei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoxorei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoxorei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoxorei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
-  return vamoxorei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoxorei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
-  return vamoxorei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoxorei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoxorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoxorei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoxorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoxorei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoxorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoxorei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoxorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoxorei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoxorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoxorei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoxorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoxorei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoxorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoxorei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoxorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoxorei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoxorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoxorei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoxorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoxorei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoxorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoxorei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoxorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoxorei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoxorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoxorei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoxorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoxorei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoxorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoxorei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoxorei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoxorei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoxorei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoxorei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoxorei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoxorei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoxorei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoxorei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoxorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoxorei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoxorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoxorei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoxorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoxorei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoxorei8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoxorei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoxorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoxorei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoxorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoxorei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoxorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoxorei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoxorei16(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoxorei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoxorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoxorei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoxorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoxorei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoxorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoxorei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoxorei32(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoxorei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoxorei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoxorei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoxorei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoxorei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoxorei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoxorei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoxorei64(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoxorei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoxorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoxorei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
-  return vamoxorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoxorei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoxorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoxorei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
-  return vamoxorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoxorei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
-  return vamoxorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoxorei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoxorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoxorei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoxorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoxorei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
-  return vamoxorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoxorei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
-  return vamoxorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoxorei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
-  return vamoxorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoxorei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoxorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoxorei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
-  return vamoxorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoxorei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoxorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoxorei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
-  return vamoxorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoxorei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
-  return vamoxorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoxorei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoxorei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoxorei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoxorei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoxorei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
-  return vamoxorei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoxorei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
-  return vamoxorei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoxorei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
-  return vamoxorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoxorei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
-  return vamoxorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoxorei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoxorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoxorei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
-  return vamoxorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoxorei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
-  return vamoxorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoxorei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoxorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoxorei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
-  return vamoxorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoxorei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
-  return vamoxorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoxorei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
-  return vamoxorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoxorei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
-  return vamoxorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoxorei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoxorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoxorei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
-  return vamoxorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoxorei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
-  return vamoxorei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoxorei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoxorei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoxorei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
-  return vamoxorei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoxorei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
-  return vamoxorei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoxorei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoxorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoxorei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoxorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoxorei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoxorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoxorei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoxorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoxorei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoxorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoxorei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoxorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoxorei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoxorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoxorei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoxorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoxorei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoxorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoxorei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoxorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoxorei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoxorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoxorei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoxorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoxorei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoxorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoxorei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoxorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoxorei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoxorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoxorei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoxorei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoxorei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoxorei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoxorei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoxorei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoxorei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoxorei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoxorei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoxorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoxorei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoxorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoxorei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoxorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoxorei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoxorei8(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoxorei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoxorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoxorei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoxorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoxorei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoxorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoxorei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoxorei16(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoxorei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoxorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoxorei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoxorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoxorei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoxorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoxorei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoxorei32(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoxorei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoxorei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoxorei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoxorei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoxorei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoxorei64(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoxorei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoxorei64(mask, base, bindex, value, vl);
-}
-

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoadd.c
deleted file mode 100644
index c90fac2ecb4b8..0000000000000
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoadd.c
+++ /dev/null
@@ -1,1406 +0,0 @@
-// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
-// REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
-
-#include <riscv_vector.h>
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoaddei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoaddei8_v_i32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoaddei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
-  return vamoaddei8_v_i32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoaddei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoaddei8_v_i32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoaddei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
-  return vamoaddei8_v_i32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoaddei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
-  return vamoaddei8_v_i32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoaddei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoaddei16_v_i32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoaddei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoaddei16_v_i32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoaddei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
-  return vamoaddei16_v_i32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoaddei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
-  return vamoaddei16_v_i32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoaddei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
-  return vamoaddei16_v_i32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoaddei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoaddei32_v_i32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoaddei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
-  return vamoaddei32_v_i32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoaddei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoaddei32_v_i32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoaddei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
-  return vamoaddei32_v_i32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoaddei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
-  return vamoaddei32_v_i32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoaddei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoaddei64_v_i32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoaddei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoaddei64_v_i32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoaddei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
-  return vamoaddei64_v_i32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoaddei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
-  return vamoaddei64_v_i32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoaddei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
-  return vamoaddei8_v_i64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoaddei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
-  return vamoaddei8_v_i64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoaddei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoaddei8_v_i64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoaddei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
-  return vamoaddei8_v_i64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoaddei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
-  return vamoaddei16_v_i64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoaddei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoaddei16_v_i64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoaddei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
-  return vamoaddei16_v_i64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoaddei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
-  return vamoaddei16_v_i64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoaddei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
-  return vamoaddei32_v_i64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoaddei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
-  return vamoaddei32_v_i64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoaddei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoaddei32_v_i64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoaddei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
-  return vamoaddei32_v_i64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoaddei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
-  return vamoaddei64_v_i64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoaddei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoaddei64_v_i64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoaddei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
-  return vamoaddei64_v_i64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoaddei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
-  return vamoaddei64_v_i64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoaddei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoaddei8_v_u32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoaddei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoaddei8_v_u32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoaddei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoaddei8_v_u32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoaddei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoaddei8_v_u32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoaddei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoaddei8_v_u32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoaddei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoaddei16_v_u32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoaddei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoaddei16_v_u32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoaddei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoaddei16_v_u32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoaddei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoaddei16_v_u32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoaddei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoaddei16_v_u32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoaddei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoaddei32_v_u32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoaddei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoaddei32_v_u32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoaddei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoaddei32_v_u32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoaddei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoaddei32_v_u32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoaddei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoaddei32_v_u32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoaddei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoaddei64_v_u32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoaddei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoaddei64_v_u32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoaddei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoaddei64_v_u32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoaddei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoaddei64_v_u32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoaddei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoaddei8_v_u64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoaddei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoaddei8_v_u64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoaddei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoaddei8_v_u64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoaddei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoaddei8_v_u64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoaddei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoaddei16_v_u64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoaddei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoaddei16_v_u64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoaddei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoaddei16_v_u64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoaddei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoaddei16_v_u64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoaddei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoaddei32_v_u64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoaddei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoaddei32_v_u64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoaddei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoaddei32_v_u64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoaddei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoaddei32_v_u64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoaddei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoaddei64_v_u64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoaddei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoaddei64_v_u64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoaddei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoaddei64_v_u64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoaddei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoaddei64_v_u64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoaddei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoaddei8_v_i32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoaddei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
-  return vamoaddei8_v_i32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoaddei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoaddei8_v_i32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoaddei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
-  return vamoaddei8_v_i32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoaddei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
-  return vamoaddei8_v_i32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoaddei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoaddei16_v_i32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoaddei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoaddei16_v_i32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoaddei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
-  return vamoaddei16_v_i32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoaddei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
-  return vamoaddei16_v_i32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoaddei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
-  return vamoaddei16_v_i32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoaddei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoaddei32_v_i32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoaddei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
-  return vamoaddei32_v_i32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoaddei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoaddei32_v_i32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoaddei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
-  return vamoaddei32_v_i32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoaddei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
-  return vamoaddei32_v_i32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoaddei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoaddei64_v_i32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoaddei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoaddei64_v_i32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoaddei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
-  return vamoaddei64_v_i32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoaddei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
-  return vamoaddei64_v_i32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoaddei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
-  return vamoaddei8_v_i64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoaddei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
-  return vamoaddei8_v_i64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoaddei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoaddei8_v_i64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoaddei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
-  return vamoaddei8_v_i64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoaddei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
-  return vamoaddei16_v_i64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoaddei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoaddei16_v_i64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoaddei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
-  return vamoaddei16_v_i64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoaddei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
-  return vamoaddei16_v_i64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoaddei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
-  return vamoaddei32_v_i64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoaddei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
-  return vamoaddei32_v_i64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoaddei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoaddei32_v_i64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoaddei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
-  return vamoaddei32_v_i64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoaddei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
-  return vamoaddei64_v_i64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoaddei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoaddei64_v_i64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoaddei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
-  return vamoaddei64_v_i64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoaddei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
-  return vamoaddei64_v_i64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoaddei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoaddei8_v_u32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoaddei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoaddei8_v_u32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoaddei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoaddei8_v_u32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoaddei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoaddei8_v_u32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoaddei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoaddei8_v_u32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoaddei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoaddei16_v_u32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoaddei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoaddei16_v_u32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoaddei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoaddei16_v_u32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoaddei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoaddei16_v_u32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoaddei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoaddei16_v_u32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoaddei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoaddei32_v_u32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoaddei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoaddei32_v_u32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoaddei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoaddei32_v_u32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoaddei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoaddei32_v_u32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoaddei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoaddei32_v_u32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoaddei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoaddei64_v_u32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoaddei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoaddei64_v_u32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoaddei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoaddei64_v_u32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoaddei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoaddei64_v_u32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoaddei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoaddei8_v_u64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoaddei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoaddei8_v_u64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoaddei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoaddei8_v_u64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoaddei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoaddei8_v_u64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoaddei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoaddei16_v_u64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoaddei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoaddei16_v_u64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoaddei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoaddei16_v_u64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoaddei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoaddei16_v_u64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoaddei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoaddei32_v_u64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoaddei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoaddei32_v_u64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoaddei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoaddei32_v_u64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoaddei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoaddei32_v_u64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoaddei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoaddei64_v_u64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoaddei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoaddei64_v_u64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoaddei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoaddei64_v_u64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoaddei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoaddei64_v_u64m8_m(mask, base, bindex, value, vl);
-}
-

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoand.c
deleted file mode 100644
index abc9083c86f18..0000000000000
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoand.c
+++ /dev/null
@@ -1,1406 +0,0 @@
-// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
-// REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
-
-#include <riscv_vector.h>
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoandei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoandei8_v_i32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoandei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
-  return vamoandei8_v_i32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoandei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoandei8_v_i32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoandei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
-  return vamoandei8_v_i32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoandei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
-  return vamoandei8_v_i32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoandei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoandei16_v_i32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoandei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoandei16_v_i32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoandei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
-  return vamoandei16_v_i32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoandei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
-  return vamoandei16_v_i32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoandei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
-  return vamoandei16_v_i32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoandei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoandei32_v_i32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoandei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
-  return vamoandei32_v_i32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoandei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoandei32_v_i32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoandei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
-  return vamoandei32_v_i32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoandei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
-  return vamoandei32_v_i32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoandei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoandei64_v_i32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoandei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoandei64_v_i32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoandei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
-  return vamoandei64_v_i32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoandei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
-  return vamoandei64_v_i32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoandei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
-  return vamoandei8_v_i64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoandei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
-  return vamoandei8_v_i64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoandei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoandei8_v_i64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoandei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
-  return vamoandei8_v_i64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoandei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
-  return vamoandei16_v_i64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoandei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoandei16_v_i64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoandei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
-  return vamoandei16_v_i64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoandei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
-  return vamoandei16_v_i64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoandei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
-  return vamoandei32_v_i64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoandei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
-  return vamoandei32_v_i64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoandei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoandei32_v_i64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoandei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
-  return vamoandei32_v_i64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoandei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
-  return vamoandei64_v_i64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoandei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoandei64_v_i64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoandei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
-  return vamoandei64_v_i64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoandei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
-  return vamoandei64_v_i64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoandei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoandei8_v_u32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoandei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoandei8_v_u32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoandei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoandei8_v_u32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoandei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoandei8_v_u32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoandei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoandei8_v_u32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoandei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoandei16_v_u32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoandei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoandei16_v_u32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoandei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoandei16_v_u32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoandei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoandei16_v_u32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoandei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoandei16_v_u32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoandei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoandei32_v_u32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoandei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoandei32_v_u32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoandei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoandei32_v_u32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoandei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoandei32_v_u32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoandei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoandei32_v_u32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoandei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoandei64_v_u32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoandei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoandei64_v_u32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoandei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoandei64_v_u32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoandei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoandei64_v_u32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoandei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoandei8_v_u64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoandei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoandei8_v_u64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoandei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoandei8_v_u64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoandei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoandei8_v_u64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoandei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoandei16_v_u64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoandei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoandei16_v_u64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoandei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoandei16_v_u64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoandei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoandei16_v_u64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoandei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoandei32_v_u64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoandei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoandei32_v_u64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoandei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoandei32_v_u64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoandei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoandei32_v_u64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoandei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoandei64_v_u64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoandei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoandei64_v_u64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoandei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoandei64_v_u64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoandei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoandei64_v_u64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoandei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoandei8_v_i32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoandei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
-  return vamoandei8_v_i32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoandei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoandei8_v_i32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoandei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
-  return vamoandei8_v_i32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoandei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
-  return vamoandei8_v_i32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoandei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoandei16_v_i32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoandei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoandei16_v_i32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoandei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
-  return vamoandei16_v_i32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoandei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
-  return vamoandei16_v_i32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoandei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
-  return vamoandei16_v_i32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoandei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoandei32_v_i32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoandei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
-  return vamoandei32_v_i32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoandei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoandei32_v_i32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoandei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
-  return vamoandei32_v_i32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoandei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
-  return vamoandei32_v_i32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoandei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoandei64_v_i32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoandei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoandei64_v_i32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoandei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
-  return vamoandei64_v_i32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoandei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
-  return vamoandei64_v_i32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoandei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
-  return vamoandei8_v_i64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoandei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
-  return vamoandei8_v_i64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoandei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoandei8_v_i64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoandei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
-  return vamoandei8_v_i64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoandei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
-  return vamoandei16_v_i64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoandei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoandei16_v_i64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoandei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
-  return vamoandei16_v_i64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoandei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
-  return vamoandei16_v_i64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoandei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
-  return vamoandei32_v_i64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoandei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
-  return vamoandei32_v_i64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoandei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoandei32_v_i64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoandei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
-  return vamoandei32_v_i64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoandei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
-  return vamoandei64_v_i64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoandei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoandei64_v_i64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoandei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
-  return vamoandei64_v_i64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoandei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
-  return vamoandei64_v_i64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoandei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoandei8_v_u32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoandei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoandei8_v_u32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoandei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoandei8_v_u32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoandei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoandei8_v_u32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoandei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoandei8_v_u32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoandei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoandei16_v_u32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoandei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoandei16_v_u32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoandei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoandei16_v_u32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoandei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoandei16_v_u32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoandei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoandei16_v_u32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoandei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoandei32_v_u32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoandei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoandei32_v_u32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoandei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoandei32_v_u32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoandei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoandei32_v_u32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoandei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoandei32_v_u32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoandei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoandei64_v_u32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoandei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoandei64_v_u32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoandei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoandei64_v_u32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoandei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoandei64_v_u32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoandei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoandei8_v_u64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoandei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoandei8_v_u64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoandei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoandei8_v_u64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei8_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoandei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoandei8_v_u64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoandei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoandei16_v_u64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoandei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoandei16_v_u64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoandei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoandei16_v_u64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei16_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoandei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoandei16_v_u64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoandei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoandei32_v_u64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoandei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoandei32_v_u64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoandei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoandei32_v_u64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei32_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoandei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoandei32_v_u64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoandei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoandei64_v_u64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoandei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoandei64_v_u64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoandei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoandei64_v_u64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoandei64_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoandei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoandei64_v_u64m8_m(mask, base, bindex, value, vl);
-}
-

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamomax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamomax.c
deleted file mode 100644
index 486d9ad732941..0000000000000
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamomax.c
+++ /dev/null
@@ -1,1406 +0,0 @@
-// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
-// REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
-
-#include <riscv_vector.h>
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamomaxei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
-  return vamomaxei8_v_i32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamomaxei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
-  return vamomaxei8_v_i32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamomaxei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
-  return vamomaxei8_v_i32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamomaxei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
-  return vamomaxei8_v_i32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamomaxei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
-  return vamomaxei8_v_i32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamomaxei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
-  return vamomaxei16_v_i32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamomaxei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
-  return vamomaxei16_v_i32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamomaxei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
-  return vamomaxei16_v_i32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamomaxei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
-  return vamomaxei16_v_i32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamomaxei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
-  return vamomaxei16_v_i32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamomaxei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
-  return vamomaxei32_v_i32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamomaxei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
-  return vamomaxei32_v_i32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamomaxei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
-  return vamomaxei32_v_i32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamomaxei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
-  return vamomaxei32_v_i32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamomaxei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
-  return vamomaxei32_v_i32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei64_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamomaxei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
-  return vamomaxei64_v_i32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamomaxei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
-  return vamomaxei64_v_i32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamomaxei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
-  return vamomaxei64_v_i32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamomaxei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
-  return vamomaxei64_v_i32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamomaxei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
-  return vamomaxei8_v_i64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamomaxei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
-  return vamomaxei8_v_i64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamomaxei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
-  return vamomaxei8_v_i64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamomaxei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
-  return vamomaxei8_v_i64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamomaxei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
-  return vamomaxei16_v_i64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamomaxei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
-  return vamomaxei16_v_i64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamomaxei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
-  return vamomaxei16_v_i64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamomaxei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
-  return vamomaxei16_v_i64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamomaxei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
-  return vamomaxei32_v_i64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamomaxei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
-  return vamomaxei32_v_i64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamomaxei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
-  return vamomaxei32_v_i64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamomaxei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
-  return vamomaxei32_v_i64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamomaxei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
-  return vamomaxei64_v_i64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamomaxei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
-  return vamomaxei64_v_i64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamomaxei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
-  return vamomaxei64_v_i64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamomaxei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
-  return vamomaxei64_v_i64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamomaxuei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamomaxuei8_v_u32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamomaxuei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
-  return vamomaxuei8_v_u32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamomaxuei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamomaxuei8_v_u32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamomaxuei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
-  return vamomaxuei8_v_u32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamomaxuei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
-  return vamomaxuei8_v_u32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamomaxuei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamomaxuei16_v_u32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamomaxuei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamomaxuei16_v_u32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamomaxuei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
-  return vamomaxuei16_v_u32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamomaxuei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
-  return vamomaxuei16_v_u32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamomaxuei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
-  return vamomaxuei16_v_u32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamomaxuei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamomaxuei32_v_u32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamomaxuei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
-  return vamomaxuei32_v_u32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamomaxuei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamomaxuei32_v_u32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamomaxuei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
-  return vamomaxuei32_v_u32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamomaxuei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
-  return vamomaxuei32_v_u32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamomaxuei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamomaxuei64_v_u32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamomaxuei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamomaxuei64_v_u32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamomaxuei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
-  return vamomaxuei64_v_u32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamomaxuei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
-  return vamomaxuei64_v_u32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamomaxuei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
-  return vamomaxuei8_v_u64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamomaxuei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
-  return vamomaxuei8_v_u64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamomaxuei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamomaxuei8_v_u64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamomaxuei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
-  return vamomaxuei8_v_u64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamomaxuei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
-  return vamomaxuei16_v_u64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamomaxuei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamomaxuei16_v_u64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamomaxuei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
-  return vamomaxuei16_v_u64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamomaxuei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
-  return vamomaxuei16_v_u64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamomaxuei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
-  return vamomaxuei32_v_u64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamomaxuei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
-  return vamomaxuei32_v_u64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamomaxuei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamomaxuei32_v_u64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamomaxuei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
-  return vamomaxuei32_v_u64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamomaxuei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
-  return vamomaxuei64_v_u64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamomaxuei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamomaxuei64_v_u64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamomaxuei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
-  return vamomaxuei64_v_u64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamomaxuei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
-  return vamomaxuei64_v_u64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamomaxei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
-  return vamomaxei8_v_i32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamomaxei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
-  return vamomaxei8_v_i32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamomaxei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
-  return vamomaxei8_v_i32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamomaxei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
-  return vamomaxei8_v_i32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamomaxei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
-  return vamomaxei8_v_i32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamomaxei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
-  return vamomaxei16_v_i32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamomaxei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
-  return vamomaxei16_v_i32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamomaxei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
-  return vamomaxei16_v_i32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamomaxei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
-  return vamomaxei16_v_i32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamomaxei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
-  return vamomaxei16_v_i32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamomaxei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
-  return vamomaxei32_v_i32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamomaxei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
-  return vamomaxei32_v_i32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamomaxei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
-  return vamomaxei32_v_i32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamomaxei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
-  return vamomaxei32_v_i32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamomaxei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
-  return vamomaxei32_v_i32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei64_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamomaxei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
-  return vamomaxei64_v_i32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamomaxei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
-  return vamomaxei64_v_i32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamomaxei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
-  return vamomaxei64_v_i32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamomaxei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
-  return vamomaxei64_v_i32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamomaxei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
-  return vamomaxei8_v_i64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamomaxei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
-  return vamomaxei8_v_i64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamomaxei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
-  return vamomaxei8_v_i64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamomaxei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
-  return vamomaxei8_v_i64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamomaxei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
-  return vamomaxei16_v_i64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamomaxei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
-  return vamomaxei16_v_i64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamomaxei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
-  return vamomaxei16_v_i64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamomaxei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
-  return vamomaxei16_v_i64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamomaxei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
-  return vamomaxei32_v_i64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamomaxei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
-  return vamomaxei32_v_i64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamomaxei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
-  return vamomaxei32_v_i64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamomaxei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
-  return vamomaxei32_v_i64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamomaxei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
-  return vamomaxei64_v_i64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamomaxei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
-  return vamomaxei64_v_i64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamomaxei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
-  return vamomaxei64_v_i64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamomaxei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
-  return vamomaxei64_v_i64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamomaxuei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamomaxuei8_v_u32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamomaxuei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
-  return vamomaxuei8_v_u32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamomaxuei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamomaxuei8_v_u32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamomaxuei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
-  return vamomaxuei8_v_u32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamomaxuei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
-  return vamomaxuei8_v_u32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamomaxuei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamomaxuei16_v_u32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamomaxuei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamomaxuei16_v_u32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamomaxuei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
-  return vamomaxuei16_v_u32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamomaxuei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
-  return vamomaxuei16_v_u32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamomaxuei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
-  return vamomaxuei16_v_u32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamomaxuei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamomaxuei32_v_u32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamomaxuei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
-  return vamomaxuei32_v_u32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamomaxuei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamomaxuei32_v_u32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamomaxuei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
-  return vamomaxuei32_v_u32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamomaxuei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
-  return vamomaxuei32_v_u32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamomaxuei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamomaxuei64_v_u32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamomaxuei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamomaxuei64_v_u32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamomaxuei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
-  return vamomaxuei64_v_u32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamomaxuei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
-  return vamomaxuei64_v_u32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamomaxuei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
-  return vamomaxuei8_v_u64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamomaxuei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
-  return vamomaxuei8_v_u64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamomaxuei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamomaxuei8_v_u64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamomaxuei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
-  return vamomaxuei8_v_u64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamomaxuei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
-  return vamomaxuei16_v_u64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamomaxuei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamomaxuei16_v_u64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamomaxuei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
-  return vamomaxuei16_v_u64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamomaxuei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
-  return vamomaxuei16_v_u64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamomaxuei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
-  return vamomaxuei32_v_u64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamomaxuei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
-  return vamomaxuei32_v_u64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamomaxuei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamomaxuei32_v_u64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamomaxuei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
-  return vamomaxuei32_v_u64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamomaxuei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
-  return vamomaxuei64_v_u64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamomaxuei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamomaxuei64_v_u64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamomaxuei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
-  return vamomaxuei64_v_u64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamomaxuei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
-  return vamomaxuei64_v_u64m8_m(mask, base, bindex, value, vl);
-}
-

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamomin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamomin.c
deleted file mode 100644
index b197cd89be8a0..0000000000000
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamomin.c
+++ /dev/null
@@ -1,1406 +0,0 @@
-// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
-// REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
-
-#include <riscv_vector.h>
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamominei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
-  return vamominei8_v_i32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamominei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
-  return vamominei8_v_i32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamominei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
-  return vamominei8_v_i32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamominei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
-  return vamominei8_v_i32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamominei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
-  return vamominei8_v_i32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamominei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
-  return vamominei16_v_i32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamominei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
-  return vamominei16_v_i32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamominei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
-  return vamominei16_v_i32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamominei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
-  return vamominei16_v_i32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamominei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
-  return vamominei16_v_i32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamominei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
-  return vamominei32_v_i32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamominei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
-  return vamominei32_v_i32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamominei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
-  return vamominei32_v_i32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamominei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
-  return vamominei32_v_i32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamominei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
-  return vamominei32_v_i32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei64_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamominei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
-  return vamominei64_v_i32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei64_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamominei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
-  return vamominei64_v_i32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei64_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamominei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
-  return vamominei64_v_i32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei64_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamominei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
-  return vamominei64_v_i32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamominei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
-  return vamominei8_v_i64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamominei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
-  return vamominei8_v_i64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamominei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
-  return vamominei8_v_i64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamominei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
-  return vamominei8_v_i64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamominei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
-  return vamominei16_v_i64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamominei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
-  return vamominei16_v_i64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamominei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
-  return vamominei16_v_i64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamominei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
-  return vamominei16_v_i64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamominei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
-  return vamominei32_v_i64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamominei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
-  return vamominei32_v_i64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamominei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
-  return vamominei32_v_i64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamominei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
-  return vamominei32_v_i64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei64_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamominei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
-  return vamominei64_v_i64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei64_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamominei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
-  return vamominei64_v_i64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei64_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamominei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
-  return vamominei64_v_i64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei64_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamominei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
-  return vamominei64_v_i64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamominuei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamominuei8_v_u32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamominuei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
-  return vamominuei8_v_u32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamominuei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamominuei8_v_u32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamominuei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
-  return vamominuei8_v_u32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamominuei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
-  return vamominuei8_v_u32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamominuei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamominuei16_v_u32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamominuei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamominuei16_v_u32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamominuei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
-  return vamominuei16_v_u32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamominuei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
-  return vamominuei16_v_u32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamominuei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
-  return vamominuei16_v_u32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamominuei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamominuei32_v_u32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamominuei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
-  return vamominuei32_v_u32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamominuei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamominuei32_v_u32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamominuei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
-  return vamominuei32_v_u32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamominuei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
-  return vamominuei32_v_u32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei64_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamominuei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamominuei64_v_u32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei64_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamominuei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamominuei64_v_u32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei64_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamominuei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
-  return vamominuei64_v_u32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei64_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamominuei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
-  return vamominuei64_v_u32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamominuei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
-  return vamominuei8_v_u64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamominuei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
-  return vamominuei8_v_u64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamominuei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamominuei8_v_u64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamominuei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
-  return vamominuei8_v_u64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamominuei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
-  return vamominuei16_v_u64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamominuei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamominuei16_v_u64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamominuei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
-  return vamominuei16_v_u64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamominuei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
-  return vamominuei16_v_u64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamominuei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
-  return vamominuei32_v_u64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamominuei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
-  return vamominuei32_v_u64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamominuei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamominuei32_v_u64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamominuei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
-  return vamominuei32_v_u64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei64_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamominuei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
-  return vamominuei64_v_u64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei64_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamominuei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamominuei64_v_u64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei64_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamominuei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
-  return vamominuei64_v_u64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei64_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamominuei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
-  return vamominuei64_v_u64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamominei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
-  return vamominei8_v_i32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamominei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
-  return vamominei8_v_i32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamominei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
-  return vamominei8_v_i32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamominei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
-  return vamominei8_v_i32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamominei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
-  return vamominei8_v_i32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamominei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
-  return vamominei16_v_i32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamominei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
-  return vamominei16_v_i32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamominei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
-  return vamominei16_v_i32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamominei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
-  return vamominei16_v_i32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamominei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
-  return vamominei16_v_i32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamominei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
-  return vamominei32_v_i32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamominei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
-  return vamominei32_v_i32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamominei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
-  return vamominei32_v_i32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamominei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
-  return vamominei32_v_i32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamominei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
-  return vamominei32_v_i32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei64_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamominei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
-  return vamominei64_v_i32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei64_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamominei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
-  return vamominei64_v_i32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei64_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamominei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
-  return vamominei64_v_i32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei64_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamominei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
-  return vamominei64_v_i32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamominei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
-  return vamominei8_v_i64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamominei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
-  return vamominei8_v_i64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamominei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
-  return vamominei8_v_i64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei8_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamominei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
-  return vamominei8_v_i64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamominei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
-  return vamominei16_v_i64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamominei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
-  return vamominei16_v_i64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamominei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
-  return vamominei16_v_i64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei16_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamominei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
-  return vamominei16_v_i64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamominei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
-  return vamominei32_v_i64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamominei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
-  return vamominei32_v_i64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamominei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
-  return vamominei32_v_i64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei32_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamominei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
-  return vamominei32_v_i64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei64_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamominei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
-  return vamominei64_v_i64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei64_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamominei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
-  return vamominei64_v_i64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei64_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamominei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
-  return vamominei64_v_i64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominei64_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamominei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
-  return vamominei64_v_i64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamominuei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamominuei8_v_u32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamominuei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
-  return vamominuei8_v_u32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamominuei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamominuei8_v_u32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamominuei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
-  return vamominuei8_v_u32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamominuei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
-  return vamominuei8_v_u32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamominuei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamominuei16_v_u32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamominuei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamominuei16_v_u32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamominuei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
-  return vamominuei16_v_u32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamominuei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
-  return vamominuei16_v_u32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamominuei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
-  return vamominuei16_v_u32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamominuei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamominuei32_v_u32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamominuei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
-  return vamominuei32_v_u32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamominuei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamominuei32_v_u32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamominuei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
-  return vamominuei32_v_u32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamominuei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
-  return vamominuei32_v_u32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei64_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamominuei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamominuei64_v_u32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei64_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamominuei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamominuei64_v_u32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei64_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamominuei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
-  return vamominuei64_v_u32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei64_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamominuei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
-  return vamominuei64_v_u32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamominuei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
-  return vamominuei8_v_u64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamominuei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
-  return vamominuei8_v_u64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamominuei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamominuei8_v_u64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei8_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamominuei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
-  return vamominuei8_v_u64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamominuei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
-  return vamominuei16_v_u64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamominuei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamominuei16_v_u64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamominuei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
-  return vamominuei16_v_u64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei16_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamominuei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
-  return vamominuei16_v_u64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamominuei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
-  return vamominuei32_v_u64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamominuei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
-  return vamominuei32_v_u64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamominuei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamominuei32_v_u64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei32_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamominuei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
-  return vamominuei32_v_u64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei64_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamominuei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
-  return vamominuei64_v_u64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei64_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamominuei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamominuei64_v_u64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei64_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamominuei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
-  return vamominuei64_v_u64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamominuei64_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamominuei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
-  return vamominuei64_v_u64m8_m(mask, base, bindex, value, vl);
-}
-

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoor.c
deleted file mode 100644
index 6ce8f94bcc07d..0000000000000
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoor.c
+++ /dev/null
@@ -1,1406 +0,0 @@
-// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
-// REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
-
-#include <riscv_vector.h>
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoorei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoorei8_v_i32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoorei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
-  return vamoorei8_v_i32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoorei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoorei8_v_i32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoorei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
-  return vamoorei8_v_i32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoorei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
-  return vamoorei8_v_i32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoorei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoorei16_v_i32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoorei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoorei16_v_i32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoorei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
-  return vamoorei16_v_i32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoorei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
-  return vamoorei16_v_i32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoorei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
-  return vamoorei16_v_i32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoorei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoorei32_v_i32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoorei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
-  return vamoorei32_v_i32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoorei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoorei32_v_i32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoorei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
-  return vamoorei32_v_i32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoorei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
-  return vamoorei32_v_i32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoorei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoorei64_v_i32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoorei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoorei64_v_i32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoorei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
-  return vamoorei64_v_i32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoorei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
-  return vamoorei64_v_i32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoorei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
-  return vamoorei8_v_i64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoorei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
-  return vamoorei8_v_i64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoorei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoorei8_v_i64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoorei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
-  return vamoorei8_v_i64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoorei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
-  return vamoorei16_v_i64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoorei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoorei16_v_i64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoorei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
-  return vamoorei16_v_i64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoorei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
-  return vamoorei16_v_i64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoorei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
-  return vamoorei32_v_i64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoorei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
-  return vamoorei32_v_i64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoorei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoorei32_v_i64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoorei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
-  return vamoorei32_v_i64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoorei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
-  return vamoorei64_v_i64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoorei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoorei64_v_i64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoorei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
-  return vamoorei64_v_i64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoorei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
-  return vamoorei64_v_i64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoorei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoorei8_v_u32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoorei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoorei8_v_u32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoorei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoorei8_v_u32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoorei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoorei8_v_u32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoorei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoorei8_v_u32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoorei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoorei16_v_u32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoorei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoorei16_v_u32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoorei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoorei16_v_u32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoorei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoorei16_v_u32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoorei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoorei16_v_u32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoorei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoorei32_v_u32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoorei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoorei32_v_u32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoorei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoorei32_v_u32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoorei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoorei32_v_u32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoorei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoorei32_v_u32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoorei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoorei64_v_u32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoorei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoorei64_v_u32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoorei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoorei64_v_u32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoorei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoorei64_v_u32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoorei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoorei8_v_u64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoorei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoorei8_v_u64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoorei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoorei8_v_u64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoorei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoorei8_v_u64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoorei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoorei16_v_u64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoorei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoorei16_v_u64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoorei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoorei16_v_u64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoorei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoorei16_v_u64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoorei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoorei32_v_u64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoorei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoorei32_v_u64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoorei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoorei32_v_u64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoorei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoorei32_v_u64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoorei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoorei64_v_u64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoorei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoorei64_v_u64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoorei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoorei64_v_u64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoorei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoorei64_v_u64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoorei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoorei8_v_i32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoorei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
-  return vamoorei8_v_i32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoorei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoorei8_v_i32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoorei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
-  return vamoorei8_v_i32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoorei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
-  return vamoorei8_v_i32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoorei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoorei16_v_i32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoorei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoorei16_v_i32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoorei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
-  return vamoorei16_v_i32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoorei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
-  return vamoorei16_v_i32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoorei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
-  return vamoorei16_v_i32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoorei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoorei32_v_i32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoorei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
-  return vamoorei32_v_i32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoorei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoorei32_v_i32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoorei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
-  return vamoorei32_v_i32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoorei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
-  return vamoorei32_v_i32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoorei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoorei64_v_i32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoorei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoorei64_v_i32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoorei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
-  return vamoorei64_v_i32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoorei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
-  return vamoorei64_v_i32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoorei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
-  return vamoorei8_v_i64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoorei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
-  return vamoorei8_v_i64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoorei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoorei8_v_i64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoorei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
-  return vamoorei8_v_i64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoorei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
-  return vamoorei16_v_i64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoorei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoorei16_v_i64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoorei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
-  return vamoorei16_v_i64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoorei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
-  return vamoorei16_v_i64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoorei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
-  return vamoorei32_v_i64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoorei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
-  return vamoorei32_v_i64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoorei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoorei32_v_i64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoorei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
-  return vamoorei32_v_i64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoorei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
-  return vamoorei64_v_i64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoorei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoorei64_v_i64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoorei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
-  return vamoorei64_v_i64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoorei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
-  return vamoorei64_v_i64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoorei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoorei8_v_u32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoorei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoorei8_v_u32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoorei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoorei8_v_u32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoorei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoorei8_v_u32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoorei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoorei8_v_u32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoorei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoorei16_v_u32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoorei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoorei16_v_u32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoorei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoorei16_v_u32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoorei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoorei16_v_u32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoorei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoorei16_v_u32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoorei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoorei32_v_u32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoorei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoorei32_v_u32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoorei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoorei32_v_u32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoorei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoorei32_v_u32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoorei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoorei32_v_u32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoorei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoorei64_v_u32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoorei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoorei64_v_u32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoorei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoorei64_v_u32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoorei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoorei64_v_u32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoorei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoorei8_v_u64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoorei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoorei8_v_u64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoorei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoorei8_v_u64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei8_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoorei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoorei8_v_u64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoorei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoorei16_v_u64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoorei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoorei16_v_u64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoorei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoorei16_v_u64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei16_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoorei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoorei16_v_u64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoorei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoorei32_v_u64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoorei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoorei32_v_u64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoorei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoorei32_v_u64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei32_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoorei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoorei32_v_u64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoorei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoorei64_v_u64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoorei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoorei64_v_u64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoorei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoorei64_v_u64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoorei64_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoorei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoorei64_v_u64m8_m(mask, base, bindex, value, vl);
-}
-

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoswap.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoswap.c
deleted file mode 100644
index 8c40d513c74c0..0000000000000
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoswap.c
+++ /dev/null
@@ -1,2107 +0,0 @@
-// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
-// REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
-// RUN:   -target-feature +experimental-zfh -target-feature +experimental-zvamo -disable-O0-optnone -fallow-half-arguments-and-returns -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
-
-#include <riscv_vector.h>
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoswapei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoswapei8_v_i32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoswapei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
-  return vamoswapei8_v_i32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoswapei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoswapei8_v_i32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoswapei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
-  return vamoswapei8_v_i32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoswapei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
-  return vamoswapei8_v_i32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoswapei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoswapei16_v_i32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoswapei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoswapei16_v_i32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoswapei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
-  return vamoswapei16_v_i32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoswapei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
-  return vamoswapei16_v_i32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoswapei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
-  return vamoswapei16_v_i32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoswapei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoswapei32_v_i32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoswapei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
-  return vamoswapei32_v_i32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoswapei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoswapei32_v_i32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoswapei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
-  return vamoswapei32_v_i32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoswapei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
-  return vamoswapei32_v_i32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoswapei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoswapei64_v_i32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoswapei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoswapei64_v_i32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoswapei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
-  return vamoswapei64_v_i32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoswapei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
-  return vamoswapei64_v_i32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoswapei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
-  return vamoswapei8_v_i64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoswapei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
-  return vamoswapei8_v_i64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoswapei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoswapei8_v_i64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoswapei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
-  return vamoswapei8_v_i64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoswapei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
-  return vamoswapei16_v_i64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoswapei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoswapei16_v_i64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoswapei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
-  return vamoswapei16_v_i64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoswapei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
-  return vamoswapei16_v_i64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoswapei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
-  return vamoswapei32_v_i64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoswapei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
-  return vamoswapei32_v_i64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoswapei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoswapei32_v_i64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoswapei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
-  return vamoswapei32_v_i64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoswapei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
-  return vamoswapei64_v_i64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoswapei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoswapei64_v_i64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoswapei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
-  return vamoswapei64_v_i64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoswapei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
-  return vamoswapei64_v_i64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoswapei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoswapei8_v_u32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoswapei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoswapei8_v_u32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoswapei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoswapei8_v_u32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoswapei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoswapei8_v_u32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoswapei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoswapei8_v_u32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoswapei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoswapei16_v_u32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoswapei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoswapei16_v_u32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoswapei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoswapei16_v_u32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoswapei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoswapei16_v_u32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoswapei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoswapei16_v_u32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoswapei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoswapei32_v_u32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoswapei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoswapei32_v_u32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoswapei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoswapei32_v_u32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoswapei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoswapei32_v_u32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoswapei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoswapei32_v_u32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoswapei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoswapei64_v_u32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoswapei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoswapei64_v_u32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoswapei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoswapei64_v_u32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoswapei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoswapei64_v_u32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoswapei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoswapei8_v_u64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoswapei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoswapei8_v_u64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoswapei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoswapei8_v_u64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoswapei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoswapei8_v_u64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoswapei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoswapei16_v_u64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoswapei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoswapei16_v_u64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoswapei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoswapei16_v_u64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoswapei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoswapei16_v_u64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoswapei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoswapei32_v_u64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoswapei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoswapei32_v_u64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoswapei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoswapei32_v_u64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoswapei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoswapei32_v_u64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoswapei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoswapei64_v_u64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoswapei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoswapei64_v_u64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoswapei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoswapei64_v_u64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoswapei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoswapei64_v_u64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i8.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
-//
-vfloat32mf2_t test_vamoswapei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) {
-  return vamoswapei8_v_f32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i8.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
-//
-vfloat32m1_t test_vamoswapei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) {
-  return vamoswapei8_v_f32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i8.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
-//
-vfloat32m2_t test_vamoswapei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) {
-  return vamoswapei8_v_f32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i8.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
-//
-vfloat32m4_t test_vamoswapei8_v_f32m4 (float *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) {
-  return vamoswapei8_v_f32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i8.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
-//
-vfloat32m8_t test_vamoswapei8_v_f32m8 (float *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) {
-  return vamoswapei8_v_f32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i16.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
-//
-vfloat32mf2_t test_vamoswapei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) {
-  return vamoswapei16_v_f32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i16.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
-//
-vfloat32m1_t test_vamoswapei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) {
-  return vamoswapei16_v_f32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i16.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
-//
-vfloat32m2_t test_vamoswapei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) {
-  return vamoswapei16_v_f32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i16.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
-//
-vfloat32m4_t test_vamoswapei16_v_f32m4 (float *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) {
-  return vamoswapei16_v_f32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i16.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
-//
-vfloat32m8_t test_vamoswapei16_v_f32m8 (float *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) {
-  return vamoswapei16_v_f32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i32.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
-//
-vfloat32mf2_t test_vamoswapei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) {
-  return vamoswapei32_v_f32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i32.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
-//
-vfloat32m1_t test_vamoswapei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) {
-  return vamoswapei32_v_f32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i32.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
-//
-vfloat32m2_t test_vamoswapei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) {
-  return vamoswapei32_v_f32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i32.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
-//
-vfloat32m4_t test_vamoswapei32_v_f32m4 (float *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) {
-  return vamoswapei32_v_f32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i32.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
-//
-vfloat32m8_t test_vamoswapei32_v_f32m8 (float *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) {
-  return vamoswapei32_v_f32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_f32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i64.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
-//
-vfloat32mf2_t test_vamoswapei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) {
-  return vamoswapei64_v_f32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i64.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
-//
-vfloat32m1_t test_vamoswapei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) {
-  return vamoswapei64_v_f32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i64.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
-//
-vfloat32m2_t test_vamoswapei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) {
-  return vamoswapei64_v_f32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i64.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
-//
-vfloat32m4_t test_vamoswapei64_v_f32m4 (float *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) {
-  return vamoswapei64_v_f32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i8.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
-//
-vfloat64m1_t test_vamoswapei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) {
-  return vamoswapei8_v_f64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i8.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
-//
-vfloat64m2_t test_vamoswapei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) {
-  return vamoswapei8_v_f64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i8.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
-//
-vfloat64m4_t test_vamoswapei8_v_f64m4 (double *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) {
-  return vamoswapei8_v_f64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i8.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
-//
-vfloat64m8_t test_vamoswapei8_v_f64m8 (double *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) {
-  return vamoswapei8_v_f64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i16.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
-//
-vfloat64m1_t test_vamoswapei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) {
-  return vamoswapei16_v_f64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i16.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
-//
-vfloat64m2_t test_vamoswapei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) {
-  return vamoswapei16_v_f64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i16.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
-//
-vfloat64m4_t test_vamoswapei16_v_f64m4 (double *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) {
-  return vamoswapei16_v_f64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i16.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
-//
-vfloat64m8_t test_vamoswapei16_v_f64m8 (double *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) {
-  return vamoswapei16_v_f64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i32.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
-//
-vfloat64m1_t test_vamoswapei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) {
-  return vamoswapei32_v_f64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i32.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
-//
-vfloat64m2_t test_vamoswapei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) {
-  return vamoswapei32_v_f64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i32.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
-//
-vfloat64m4_t test_vamoswapei32_v_f64m4 (double *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) {
-  return vamoswapei32_v_f64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i32.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
-//
-vfloat64m8_t test_vamoswapei32_v_f64m8 (double *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) {
-  return vamoswapei32_v_f64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i64.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
-//
-vfloat64m1_t test_vamoswapei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) {
-  return vamoswapei64_v_f64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i64.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
-//
-vfloat64m2_t test_vamoswapei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) {
-  return vamoswapei64_v_f64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i64.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
-//
-vfloat64m4_t test_vamoswapei64_v_f64m4 (double *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) {
-  return vamoswapei64_v_f64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i64.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
-//
-vfloat64m8_t test_vamoswapei64_v_f64m8 (double *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) {
-  return vamoswapei64_v_f64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoswapei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoswapei8_v_i32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoswapei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
-  return vamoswapei8_v_i32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoswapei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoswapei8_v_i32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoswapei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
-  return vamoswapei8_v_i32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoswapei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
-  return vamoswapei8_v_i32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoswapei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoswapei16_v_i32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoswapei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoswapei16_v_i32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoswapei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
-  return vamoswapei16_v_i32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoswapei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
-  return vamoswapei16_v_i32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoswapei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
-  return vamoswapei16_v_i32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoswapei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoswapei32_v_i32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoswapei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
-  return vamoswapei32_v_i32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoswapei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoswapei32_v_i32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoswapei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
-  return vamoswapei32_v_i32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoswapei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
-  return vamoswapei32_v_i32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoswapei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoswapei64_v_i32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoswapei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoswapei64_v_i32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoswapei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
-  return vamoswapei64_v_i32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoswapei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
-  return vamoswapei64_v_i32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoswapei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
-  return vamoswapei8_v_i64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoswapei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
-  return vamoswapei8_v_i64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoswapei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoswapei8_v_i64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoswapei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
-  return vamoswapei8_v_i64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoswapei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
-  return vamoswapei16_v_i64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoswapei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoswapei16_v_i64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoswapei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
-  return vamoswapei16_v_i64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoswapei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
-  return vamoswapei16_v_i64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoswapei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
-  return vamoswapei32_v_i64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoswapei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
-  return vamoswapei32_v_i64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoswapei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoswapei32_v_i64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoswapei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
-  return vamoswapei32_v_i64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoswapei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
-  return vamoswapei64_v_i64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoswapei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoswapei64_v_i64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoswapei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
-  return vamoswapei64_v_i64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoswapei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
-  return vamoswapei64_v_i64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoswapei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoswapei8_v_u32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoswapei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoswapei8_v_u32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoswapei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoswapei8_v_u32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoswapei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoswapei8_v_u32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoswapei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoswapei8_v_u32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoswapei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoswapei16_v_u32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoswapei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoswapei16_v_u32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoswapei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoswapei16_v_u32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoswapei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoswapei16_v_u32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoswapei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoswapei16_v_u32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoswapei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoswapei32_v_u32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoswapei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoswapei32_v_u32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoswapei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoswapei32_v_u32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoswapei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoswapei32_v_u32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoswapei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoswapei32_v_u32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoswapei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoswapei64_v_u32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoswapei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoswapei64_v_u32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoswapei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoswapei64_v_u32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoswapei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoswapei64_v_u32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoswapei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoswapei8_v_u64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoswapei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoswapei8_v_u64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoswapei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoswapei8_v_u64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoswapei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoswapei8_v_u64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoswapei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoswapei16_v_u64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoswapei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoswapei16_v_u64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoswapei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoswapei16_v_u64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoswapei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoswapei16_v_u64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoswapei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoswapei32_v_u64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoswapei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoswapei32_v_u64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoswapei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoswapei32_v_u64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoswapei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoswapei32_v_u64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoswapei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoswapei64_v_u64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoswapei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoswapei64_v_u64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoswapei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoswapei64_v_u64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoswapei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoswapei64_v_u64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i8.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
-//
-vfloat32mf2_t test_vamoswapei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) {
-  return vamoswapei8_v_f32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i8.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
-//
-vfloat32m1_t test_vamoswapei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) {
-  return vamoswapei8_v_f32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i8.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
-//
-vfloat32m2_t test_vamoswapei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) {
-  return vamoswapei8_v_f32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i8.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
-//
-vfloat32m4_t test_vamoswapei8_v_f32m4_m (vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) {
-  return vamoswapei8_v_f32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i8.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
-//
-vfloat32m8_t test_vamoswapei8_v_f32m8_m (vbool4_t mask, float *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) {
-  return vamoswapei8_v_f32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i16.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
-//
-vfloat32mf2_t test_vamoswapei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) {
-  return vamoswapei16_v_f32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i16.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
-//
-vfloat32m1_t test_vamoswapei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) {
-  return vamoswapei16_v_f32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i16.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
-//
-vfloat32m2_t test_vamoswapei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) {
-  return vamoswapei16_v_f32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i16.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
-//
-vfloat32m4_t test_vamoswapei16_v_f32m4_m (vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) {
-  return vamoswapei16_v_f32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i16.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
-//
-vfloat32m8_t test_vamoswapei16_v_f32m8_m (vbool4_t mask, float *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) {
-  return vamoswapei16_v_f32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
-//
-vfloat32mf2_t test_vamoswapei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) {
-  return vamoswapei32_v_f32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i32.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
-//
-vfloat32m1_t test_vamoswapei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) {
-  return vamoswapei32_v_f32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i32.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
-//
-vfloat32m2_t test_vamoswapei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) {
-  return vamoswapei32_v_f32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i32.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
-//
-vfloat32m4_t test_vamoswapei32_v_f32m4_m (vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) {
-  return vamoswapei32_v_f32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i32.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP1]]
-//
-vfloat32m8_t test_vamoswapei32_v_f32m8_m (vbool4_t mask, float *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) {
-  return vamoswapei32_v_f32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_f32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i64.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP1]]
-//
-vfloat32mf2_t test_vamoswapei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) {
-  return vamoswapei64_v_f32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i64.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP1]]
-//
-vfloat32m1_t test_vamoswapei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) {
-  return vamoswapei64_v_f32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i64.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP1]]
-//
-vfloat32m2_t test_vamoswapei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) {
-  return vamoswapei64_v_f32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i64.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP1]]
-//
-vfloat32m4_t test_vamoswapei64_v_f32m4_m (vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) {
-  return vamoswapei64_v_f32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i8.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
-//
-vfloat64m1_t test_vamoswapei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) {
-  return vamoswapei8_v_f64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i8.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
-//
-vfloat64m2_t test_vamoswapei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) {
-  return vamoswapei8_v_f64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i8.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
-//
-vfloat64m4_t test_vamoswapei8_v_f64m4_m (vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) {
-  return vamoswapei8_v_f64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i8.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
-//
-vfloat64m8_t test_vamoswapei8_v_f64m8_m (vbool8_t mask, double *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) {
-  return vamoswapei8_v_f64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i16.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
-//
-vfloat64m1_t test_vamoswapei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) {
-  return vamoswapei16_v_f64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i16.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
-//
-vfloat64m2_t test_vamoswapei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) {
-  return vamoswapei16_v_f64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i16.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
-//
-vfloat64m4_t test_vamoswapei16_v_f64m4_m (vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) {
-  return vamoswapei16_v_f64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i16.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
-//
-vfloat64m8_t test_vamoswapei16_v_f64m8_m (vbool8_t mask, double *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) {
-  return vamoswapei16_v_f64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
-//
-vfloat64m1_t test_vamoswapei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) {
-  return vamoswapei32_v_f64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i32.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
-//
-vfloat64m2_t test_vamoswapei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) {
-  return vamoswapei32_v_f64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i32.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
-//
-vfloat64m4_t test_vamoswapei32_v_f64m4_m (vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) {
-  return vamoswapei32_v_f64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i32.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
-//
-vfloat64m8_t test_vamoswapei32_v_f64m8_m (vbool8_t mask, double *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) {
-  return vamoswapei32_v_f64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i64.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP1]]
-//
-vfloat64m1_t test_vamoswapei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) {
-  return vamoswapei64_v_f64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i64.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP1]]
-//
-vfloat64m2_t test_vamoswapei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) {
-  return vamoswapei64_v_f64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i64.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP1]]
-//
-vfloat64m4_t test_vamoswapei64_v_f64m4_m (vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) {
-  return vamoswapei64_v_f64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i64.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP1]]
-//
-vfloat64m8_t test_vamoswapei64_v_f64m8_m (vbool8_t mask, double *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) {
-  return vamoswapei64_v_f64m8_m(mask, base, bindex, value, vl);
-}
-

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoxor.c
deleted file mode 100644
index cc5d15e7c9dc0..0000000000000
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vamoxor.c
+++ /dev/null
@@ -1,1406 +0,0 @@
-// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
-// REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -target-feature +experimental-zvamo -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
-
-#include <riscv_vector.h>
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoxorei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoxorei8_v_i32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoxorei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
-  return vamoxorei8_v_i32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoxorei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoxorei8_v_i32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoxorei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
-  return vamoxorei8_v_i32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoxorei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
-  return vamoxorei8_v_i32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoxorei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoxorei16_v_i32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoxorei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoxorei16_v_i32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoxorei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
-  return vamoxorei16_v_i32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoxorei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
-  return vamoxorei16_v_i32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoxorei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
-  return vamoxorei16_v_i32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoxorei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoxorei32_v_i32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoxorei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
-  return vamoxorei32_v_i32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoxorei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoxorei32_v_i32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoxorei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
-  return vamoxorei32_v_i32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoxorei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
-  return vamoxorei32_v_i32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoxorei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoxorei64_v_i32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoxorei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoxorei64_v_i32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoxorei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
-  return vamoxorei64_v_i32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoxorei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
-  return vamoxorei64_v_i32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoxorei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
-  return vamoxorei8_v_i64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoxorei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
-  return vamoxorei8_v_i64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoxorei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoxorei8_v_i64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoxorei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
-  return vamoxorei8_v_i64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoxorei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
-  return vamoxorei16_v_i64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoxorei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoxorei16_v_i64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoxorei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
-  return vamoxorei16_v_i64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoxorei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
-  return vamoxorei16_v_i64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoxorei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
-  return vamoxorei32_v_i64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoxorei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
-  return vamoxorei32_v_i64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoxorei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoxorei32_v_i64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoxorei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
-  return vamoxorei32_v_i64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoxorei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
-  return vamoxorei64_v_i64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoxorei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoxorei64_v_i64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoxorei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
-  return vamoxorei64_v_i64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoxorei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
-  return vamoxorei64_v_i64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoxorei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoxorei8_v_u32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoxorei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoxorei8_v_u32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoxorei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoxorei8_v_u32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoxorei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoxorei8_v_u32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoxorei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoxorei8_v_u32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoxorei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoxorei16_v_u32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoxorei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoxorei16_v_u32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoxorei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoxorei16_v_u32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoxorei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoxorei16_v_u32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoxorei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoxorei16_v_u32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoxorei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoxorei32_v_u32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoxorei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoxorei32_v_u32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoxorei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoxorei32_v_u32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoxorei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoxorei32_v_u32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoxorei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoxorei32_v_u32m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoxorei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoxorei64_v_u32mf2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoxorei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoxorei64_v_u32m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoxorei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoxorei64_v_u32m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoxorei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoxorei64_v_u32m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoxorei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoxorei8_v_u64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoxorei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoxorei8_v_u64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoxorei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoxorei8_v_u64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoxorei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoxorei8_v_u64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoxorei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoxorei16_v_u64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoxorei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoxorei16_v_u64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoxorei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoxorei16_v_u64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoxorei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoxorei16_v_u64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoxorei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoxorei32_v_u64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoxorei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoxorei32_v_u64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoxorei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoxorei32_v_u64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoxorei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoxorei32_v_u64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoxorei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoxorei64_v_u64m1(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoxorei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoxorei64_v_u64m2(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoxorei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoxorei64_v_u64m4(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoxorei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoxorei64_v_u64m8(base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoxorei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoxorei8_v_i32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoxorei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) {
-  return vamoxorei8_v_i32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoxorei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoxorei8_v_i32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoxorei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) {
-  return vamoxorei8_v_i32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoxorei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) {
-  return vamoxorei8_v_i32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoxorei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoxorei16_v_i32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoxorei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoxorei16_v_i32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoxorei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) {
-  return vamoxorei16_v_i32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoxorei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) {
-  return vamoxorei16_v_i32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoxorei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) {
-  return vamoxorei16_v_i32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoxorei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoxorei32_v_i32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoxorei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) {
-  return vamoxorei32_v_i32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoxorei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) {
-  return vamoxorei32_v_i32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoxorei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) {
-  return vamoxorei32_v_i32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vint32m8_t test_vamoxorei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) {
-  return vamoxorei32_v_i32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_i32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vint32mf2_t test_vamoxorei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) {
-  return vamoxorei64_v_i32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vint32m1_t test_vamoxorei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) {
-  return vamoxorei64_v_i32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vint32m2_t test_vamoxorei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) {
-  return vamoxorei64_v_i32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vint32m4_t test_vamoxorei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) {
-  return vamoxorei64_v_i32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoxorei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) {
-  return vamoxorei8_v_i64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoxorei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) {
-  return vamoxorei8_v_i64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoxorei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoxorei8_v_i64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoxorei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) {
-  return vamoxorei8_v_i64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoxorei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) {
-  return vamoxorei16_v_i64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoxorei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoxorei16_v_i64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoxorei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) {
-  return vamoxorei16_v_i64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoxorei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) {
-  return vamoxorei16_v_i64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoxorei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) {
-  return vamoxorei32_v_i64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoxorei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) {
-  return vamoxorei32_v_i64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoxorei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) {
-  return vamoxorei32_v_i64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoxorei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) {
-  return vamoxorei32_v_i64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vint64m1_t test_vamoxorei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) {
-  return vamoxorei64_v_i64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vint64m2_t test_vamoxorei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) {
-  return vamoxorei64_v_i64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vint64m4_t test_vamoxorei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) {
-  return vamoxorei64_v_i64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vint64m8_t test_vamoxorei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) {
-  return vamoxorei64_v_i64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoxorei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoxorei8_v_u32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoxorei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoxorei8_v_u32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoxorei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoxorei8_v_u32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoxorei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoxorei8_v_u32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoxorei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoxorei8_v_u32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoxorei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoxorei16_v_u32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoxorei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoxorei16_v_u32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoxorei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoxorei16_v_u32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoxorei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoxorei16_v_u32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoxorei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoxorei16_v_u32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoxorei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoxorei32_v_u32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoxorei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoxorei32_v_u32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoxorei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoxorei32_v_u32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoxorei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoxorei32_v_u32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP1]]
-//
-vuint32m8_t test_vamoxorei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) {
-  return vamoxorei32_v_u32m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_u32mf2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP1]]
-//
-vuint32mf2_t test_vamoxorei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) {
-  return vamoxorei64_v_u32mf2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP1]]
-//
-vuint32m1_t test_vamoxorei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) {
-  return vamoxorei64_v_u32m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
-//
-vuint32m2_t test_vamoxorei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) {
-  return vamoxorei64_v_u32m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP1]]
-//
-vuint32m4_t test_vamoxorei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) {
-  return vamoxorei64_v_u32m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoxorei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoxorei8_v_u64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoxorei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoxorei8_v_u64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoxorei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoxorei8_v_u64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoxorei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoxorei8_v_u64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoxorei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoxorei16_v_u64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoxorei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoxorei16_v_u64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoxorei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoxorei16_v_u64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoxorei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoxorei16_v_u64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoxorei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoxorei32_v_u64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoxorei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoxorei32_v_u64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoxorei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoxorei32_v_u64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoxorei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoxorei32_v_u64m8_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP1]]
-//
-vuint64m1_t test_vamoxorei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) {
-  return vamoxorei64_v_u64m1_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m2_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP1]]
-//
-vuint64m2_t test_vamoxorei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) {
-  return vamoxorei64_v_u64m2_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m4_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP1]]
-//
-vuint64m4_t test_vamoxorei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) {
-  return vamoxorei64_v_u64m4_m(mask, base, bindex, value, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m8_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
-// CHECK-RV64-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP1]]
-//
-vuint64m8_t test_vamoxorei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) {
-  return vamoxorei64_v_u64m8_m(mask, base, bindex, value, vl);
-}
-

diff  --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp
index 26aecb6fd961e..4855cad529005 100644
--- a/clang/utils/TableGen/RISCVVEmitter.cpp
+++ b/clang/utils/TableGen/RISCVVEmitter.cpp
@@ -140,8 +140,7 @@ enum RISCVExtension : uint8_t {
   F = 1 << 1,
   D = 1 << 2,
   Zfh = 1 << 3,
-  Zvamo = 1 << 4,
-  Zvlsseg = 1 << 5,
+  Zvlsseg = 1 << 4,
 };
 
 // TODO refactor RVVIntrinsic class design after support all intrinsic
@@ -787,8 +786,6 @@ RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix,
     else if (T->isFloatVector(64) || T->isFloat(64))
       RISCVExtensions |= RISCVExtension::D;
   }
-  if (RequiredExtension == "Zvamo")
-    RISCVExtensions |= RISCVExtension::Zvamo;
   if (RequiredExtension == "Zvlsseg")
     RISCVExtensions |= RISCVExtension::Zvlsseg;
 
@@ -1256,8 +1253,6 @@ bool RVVEmitter::emitExtDefStr(uint8_t Extents, raw_ostream &OS) {
     OS << LS << "defined(__riscv_d)";
   if (Extents & RISCVExtension::Zfh)
     OS << LS << "defined(__riscv_zfh)";
-  if (Extents & RISCVExtension::Zvamo)
-    OS << LS << "defined(__riscv_zvamo)";
   if (Extents & RISCVExtension::Zvlsseg)
     OS << LS << "defined(__riscv_zvlsseg)";
   OS << "\n";


        


More information about the cfe-commits mailing list